+ LOGGER.error (MessageEnum.LOAD_PROPERTIES_FAIL, "Unknown. Global issue while reloading", "", "", MsoLogger.ErrorCode.DataError, "Exception - Global issue while reloading\"", e);
+ initLogger.error(MessageEnum.RA_CONFIG_EXC, msoPropDecoded[0] + ". MSO Properties failed due to conversion error (in web.xml file)", "", "", MsoLogger.ErrorCode.DataError, "MSO Properties failed due to conversion error (in web.xml file)", ne);
+ LOGGER.error (MessageEnum.RA_CONFIG_EXC, "No retries. Exception in parsing retry code in config:" + rCode, "", "", MsoLogger.ErrorCode.SchemaError, "Exception in parsing retry code in config");
+ throw e;
+ }
+ }
+ }
+ if (retry)
+ {
+ try {
+ Thread.sleep (retryDelay * 1000L);
+ } catch (InterruptedException e1) {
+ LOGGER.debug ("Thread interrupted while sleeping", e1);
+ }
+ }
+ else
+ throw e; // exceeded retryCount or code is not retryable
+ LOGGER.error (MessageEnum.LOAD_PROPERTIES_FAIL, "Unknown. Mso Properties ID not found in cache: " + msoPropID, "", "", MsoLogger.ErrorCode.DataError, "Exception - Mso Properties ID not found in cache", e);
+ LOGGER.debug("Create Stack error - unable to query for stack status - attempting to delete stack: " + canonicalName + " - This will likely fail and/or we won't be able to query to see if delete worked");
+ LOGGER.debug("Thread interrupted while sleeping", ie);
+ }
+ deletePollTimeout -= deletePollInterval;
+ }
+ } else if ("DELETE_COMPLETE".equals(heatStack.getStackStatus())){
+ LOGGER.debug("DELETE_COMPLETE for " + canonicalName);
+ deleted = true;
+ continue;
+ } else {
+ //got a status other than DELETE_IN_PROGRESS or DELETE_COMPLETE - so break and evaluate
+ break;
+ }
+ } else {
+ // assume if we can't find it - it's deleted
+ LOGGER.debug("heatStack returned null - assume the stack " + canonicalName + " has been deleted");
+ deleted = true;
+ continue;
+ }
+
+ } catch (Exception e3) {
+ // Just log this one. We will report the original exception.
+ LOGGER.error (MessageEnum.RA_CREATE_STACK_ERR, "Create Stack: Nested exception rolling back stack: " + e3, "", "", MsoLogger.ErrorCode.BusinessProcesssError, "Create Stack: Nested exception rolling back stack on error on query");
+
+ }
+ }
+ } catch (Exception e2) {
+ // Just log this one. We will report the original exception.
+ LOGGER.error (MessageEnum.RA_CREATE_STACK_ERR, "Create Stack: Nested exception rolling back stack: " + e2, "", "", MsoLogger.ErrorCode.BusinessProcesssError, "Create Stack: Nested exception rolling back stack");
+ }
+ }
+
+ // Propagate the original exception from Stack Query.
+ me.addContext (CREATE_STACK);
+ throw me;
+ }
+ }
+
+ if (!"CREATE_COMPLETE".equals (heatStack.getStackStatus ())) {
+ LOGGER.error (MessageEnum.RA_CREATE_STACK_ERR, "Create Stack: Nested exception rolling back stack: " + e2, "", "", MsoLogger.ErrorCode.BusinessProcesssError, "Exception in Create Stack: rolling back stack");
+ }
+ }
+ MsoOpenstackException me = new MsoOpenstackException(0, "", stackErrorStatusReason.toString());
+ LOGGER.error (MessageEnum.LOAD_PROPERTIES_FAIL, "Unknown. Mso Properties ID not found in cache: " + msoPropID, "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception Mso Properties ID not found in cache: " + msoPropID, e);
+ }
+ }
+
+ /*
+ * Keep these methods around for backward compatibility
+ */
+
+ public StackInfo updateStack (String cloudSiteId,
+ String tenantId,
+ String stackName,
+ String heatTemplate,
+ Map <String, Object> stackInputs,
+ boolean pollForCompletion,
+ int timeoutMinutes) throws MsoException {
+ // Keeping this method to allow compatibility with no environment or files variable sent. In this case,
+ // simply return the new method with the environment variable set to null.
+ return this.updateStack (cloudSiteId,
+ tenantId,
+ stackName,
+ heatTemplate,
+ stackInputs,
+ pollForCompletion,
+ timeoutMinutes,
+ null,
+ null,
+ null);
+ }
+
+ public StackInfo updateStack (String cloudSiteId,
+ String tenantId,
+ String stackName,
+ String heatTemplate,
+ Map <String, Object> stackInputs,
+ boolean pollForCompletion,
+ int timeoutMinutes,
+ String environment) throws MsoException {
+ // Keeping this method to allow compatibility with no environment variable sent. In this case,
+ // simply return the new method with the files variable set to null.
+ return this.updateStack (cloudSiteId,
+ tenantId,
+ stackName,
+ heatTemplate,
+ stackInputs,
+ pollForCompletion,
+ timeoutMinutes,
+ environment,
+ null,
+ null);
+ }
+
+ public StackInfo updateStack (String cloudSiteId,
+ // Just log this one. We will report the original exception.
+ LOGGER.error (MessageEnum.RA_CREATE_TENANT_ERR, "Nested exception rolling back tenant", "Openstack", "", MsoLogger.ErrorCode.DataError, "Create Tenant error, Nested exception rolling back tenant", e2);
+ }
+ }
+
+
+ // Propagate the original exception on user/role/tenant mapping
+ if (e instanceof OpenStackBaseException) {
+ // Convert Keystone Exception to MsoOpenstackException
+ throw keystoneErrorToMsoException ((OpenStackBaseException) e, "CreateTenantUser");
+ } else {
+ MsoAdapterException me = new MsoAdapterException (e.getMessage (), e);
+ me.addContext ("CreateTenantUser");
+ throw me;
+ }
+ }
+ return tenant.getId ();
+ }
+
+ /**
+ * Query for a tenant by ID in the given cloud. If the tenant exists,
+ * return an MsoTenant object. If not, return null.
+ * <p>
+ * For the AIC Cloud (DCP/LCP): it is not clear that cloudId is needed, as all admin
+ * requests go to the centralized identity service in DCP. However, if some artifact
+ * must exist in each local LCP instance as well, then it will be needed to access the
+ * correct region.
+ * <p>
+ *
+ * @param tenantId The Openstack ID of the tenant to query
+ * @param cloudSiteId The cloud identifier (may be a region) in which to query the tenant.
+ * @return the tenant properties of the queried tenant, or null if not found
+ * @throws MsoOpenstackException Thrown if the Openstack API call returns an exception
+ LOGGER.error(MessageEnum.RA_CONNECTION_EXCEPTION, "Openstack Error, GET Tenant by Id (" + tenantId + "): " + e, "Openstack", "", MsoLogger.ErrorCode.DataError, "Exception in Openstack GET tenant by Id");
+ throw e;
+ }
+ }
+ }
+
+ /*
+ * Find a tenant (or query its existance) by its Name. This method avoids an
+ * initial lookup by ID when it's known that we have the tenant Name.
+ *
+ * @param adminClient an authenticated Keystone object
+ *
+ * @param tenantName the tenant name to query
+ *
+ * @return a Tenant object or null if not found
+ */
+ public Tenant findTenantByName (Keystone adminClient, String tenantName) {
+ LOGGER.error (MessageEnum.RA_CONNECTION_EXCEPTION, "Openstack Error, GET Tenant By Name (" + tenantName + "): " + e, "Openstack", "", MsoLogger.ErrorCode.DataError, "Exception in Openstack GET Tenant By Name");
+ throw e;
+ }
+ }
+ }
+
+ /*
+ * Look up an Openstack User by Name or Openstack ID. Check the ID first, and if that
+ * fails, try the Name.
+ *
+ * @param adminClient an authenticated Keystone object
+ *
+ * @param userName the user name or ID to query
+ *
+ * @return a User object or null if not found
+ */
+ private User findUserByNameOrId (Keystone adminClient, String userNameOrId) {
+ LOGGER.error (MessageEnum.RA_CONNECTION_EXCEPTION, "Openstack Error, GET User (" + userNameOrId + "): " + e, "Openstack", "", MsoLogger.ErrorCode.DataError, "Exception in Openstack GET User");
+ throw e;
+ }
+ }
+ }
+
+ /*
+ * Look up an Openstack User by Name. This avoids initial Openstack query by ID
+ * if we know we have the User Name.
+ *
+ * @param adminClient an authenticated Keystone object
+ *
+ * @param userName the user name to query
+ *
+ * @return a User object or null if not found
+ */
+ public User findUserByName (Keystone adminClient, String userName) {
+ LOGGER.error (MessageEnum.RA_CONNECTION_EXCEPTION, "Openstack Error, GET User By Name (" + userName + "): " + e, "Openstack", "", MsoLogger.ErrorCode.DataError, "Exception in Openstack GET User By Name");
+ throw e;
+ }
+ }
+ }
+
+ /*
+ * Look up an Openstack Role by Name or Id. There is no direct query for Roles, so
+ * need to retrieve a full list from Openstack and look for a match. By default,
+ * Openstack should have a "_member_" role for normal VM-level privileges and an
+ * "admin" role for expanded privileges (e.g. administer tenants, users, and roles).
+ * <p>
+ *
+ * @param adminClient an authenticated Keystone object
+ *
+ * @param roleNameOrId the Role name or ID to look up
+ *
+ * @return a Role object
+ */
+ private Role findRoleByNameOrId (Keystone adminClient, String roleNameOrId) {
+ LOGGER.error (MessageEnum.RA_CONNECTION_EXCEPTION, "OpenStack", "Openstack Error, GET Network By ID (" + networkId + "): " + e, "Openstack", "", MsoLogger.ErrorCode.DataError, "Exception in Openstack");
+ throw e;
+ }
+ }
+ }
+
+ /*
+ * Find a network (or query its existence) by its Name. This method avoids an
+ * initial lookup by ID when it's known that we have the network Name.
+ *
+ * Neutron does not support 'name=*' query parameter for Network query (show).
+ * The only way to query by name is to retrieve all networks and look for the
+ * match. While inefficient, this capability will be provided as it is needed
+ * by MSO, but should be avoided in favor of ID whenever possible.
+ *
+ * TODO:
+ * Network names are not required to be unique, though MSO will attempt to enforce
+ * uniqueness. This call probably needs to return an error (instead of returning
+ * the first match).
+ *
+ * @param neutronClient an authenticated Quantum object
+ * @param networkName the network name to query
+ * @return a Network object or null if not found
+ */
+ public Network findNetworkByName (Quantum neutronClient, String networkName)
+ LOGGER.debug ("Found match on network name: " + networkName);
+ return network;
+ }
+ }
+ LOGGER.debug ("findNetworkByName - no match found for " + networkName);
+ return null;
+ }
+ catch (OpenStackResponseException e) {
+ if (e.getStatus() == 404) {
+ return null;
+ } else {
+ LOGGER.error (MessageEnum.RA_CONNECTION_EXCEPTION, "OpenStack", "Openstack Error, GET Network By Name (" + networkName + "): " + e, "OpenStack", "", MsoLogger.ErrorCode.DataError, "Exception in OpenStack");
+ throw e;
+ }
+ }
+ }
+
+
+ /*
+ * An entry in the Neutron Client Cache. It saves the Neutron client object
+ * along with the token expiration. After this interval, this cache
+ * item will no longer be used.
+ */
+ private static class NeutronCacheEntry implements Serializable
+ {
+ private static final long serialVersionUID = 1L;
+
+ private String neutronUrl;
+ private String token;
+ private Calendar expires;
+
+ public NeutronCacheEntry (String neutronUrl, String token, Calendar expires) {
+ this.neutronUrl = neutronUrl;
+ this.token = token;
+ this.expires = expires;
+ }
+
+ public Quantum getNeutronClient () {
+ Quantum neutronClient = new Quantum(neutronUrl);
+ neutronClient.token(token);
+ return neutronClient;
+ }
+
+ public boolean isExpired() {
+ if (expires == null) {
+ return true;
+ }
+
+ Calendar now = Calendar.getInstance();
+ if (now.after(expires)) {
+ return true;
+ }
+
+ return false;
+ }
+ }
+
+ /**
+ * Clean up the Neutron client cache to remove expired entries.
+ */
+ public static void neutronCacheCleanup () {
+ for (String cacheKey : neutronClientCache.keySet()) {
+ if (neutronClientCache.get(cacheKey).isExpired()) {
+ neutronClientCache.remove(cacheKey);
+ LOGGER.debug ("Cleaned Up Cached Neutron Client for " + cacheKey);
+ }
+ }
+ }
+
+ /**
+ * Reset the Neutron client cache.
+ * This may be useful if cached credentials get out of sync.
+ */
+ public static void neutronCacheReset () {
+ neutronClientCache = new HashMap<String,NeutronCacheEntry>();
+ LOGGER.error (MessageEnum.LOAD_PROPERTIES_FAIL, "Unknown. Mso Properties ID not found in cache: " + msoPropID, "", "", MsoLogger.ErrorCode.DataError, "Exception - Mso Properties ID not found in cache", e);
+ LOGGER.error(MessageEnum.RA_SEND_VNF_NOTIF_ERR, "Could not deliver response to BPEL after "+totalretries+" tries: "+toBpelStr, "Camunda", "", MsoLogger.ErrorCode.DataError, "Could not deliver response to BPEL");
+ return false;
+ }
+ totalretries++;
+ int sleepinterval = retryint;
+ if (retryint < 0) {
+ // if retry interval is negative double the retry on each pass
+ sleepinterval = -retryint;
+ retryint *= 2;
+ }
+ debug("Sleeping for " + sleepinterval + " seconds.");
+ try {
+ Thread.sleep(sleepinterval * 1000L);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+ private void debug(String m) {
+ LOGGER.debug(m);
+// System.err.println(m);
+ }
+ private void sendOne(final String toBpelStr, final String bpelUrl, final boolean isxml) {
+ LOGGER.debug("Sending to BPEL server: "+bpelUrl);
+ LOGGER.debug("Content is: "+toBpelStr);
+
+ //POST
+ HttpPost post = new HttpPost(bpelUrl);
+ if (credentials != null && !credentials.isEmpty())
+ LOGGER.recordMetricEvent (queryNetworkStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryNetwork", null);
+ // Does not exist. Throw an exception (can't update a non-existent network)
+ throw new NetworkException(error, MsoExceptionCategory.USERDATA);
+ }
+ long updateNetworkStarttime = System.currentTimeMillis ();
+ try {
+ netInfo = neutron.updateNetwork (cloudSiteId,
+ tenantId,
+ networkId,
+ neutronNetworkType,
+ physicalNetworkName,
+ vlans);
+ LOGGER.recordMetricEvent (updateNetworkStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "UpdateNetwork", null);
+ LOGGER.recordMetricEvent (queryStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", null);
+ // Ignore MsoStackNotFound exception because we already checked.
+ long updateStackStarttime = System.currentTimeMillis ();
+ try {
+ heatStack = heat.updateStack (cloudSiteId,
+ tenantId,
+ networkId,
+ template,
+ stackParams,
+ true,
+ heatTemplate.getTimeoutMinutes ());
+ LOGGER.recordMetricEvent (updateStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "UpdateStack", null);
+ LOGGER.recordMetricEvent (queryStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", null);
+ LOGGER.recordMetricEvent (queryNetworkStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryNetwork", null);
+ LOGGER.recordMetricEvent (deleteNetworkStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteNetwork", null);
+ LOGGER.recordMetricEvent (deleteStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteStack", null);
+ LOGGER.recordMetricEvent (deleteNetworkStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteNetwork", null);
+ LOGGER.recordMetricEvent (deleteStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteStack", null);
+ * A list of {@link javax.xml.ws.WebServiceFeature} to configure on the proxy. Supported features not in the <code>features</code> parameter will have their default values.
+ * @return
+ * returns SDNCAdapterPortType
+ */
+ @WebEndpoint(name = "SDNCAdapterSoapHttpPort")
+ public SDNCAdapterPortType getSDNCAdapterSoapHttpPort(WebServiceFeature... features) {
+ public final static QName SERVICE = new QName("http://domain2.att.com/workflow/sdnc/adapter/callback/wsdl/v1", "SDNCCallbackAdapterService");
+ public final static QName SDNCCallbackAdapterSoapHttpPort = new QName("http://domain2.att.com/workflow/sdnc/adapter/callback/wsdl/v1", "SDNCCallbackAdapterSoapHttpPort");
+ * A list of {@link javax.xml.ws.WebServiceFeature} to configure on the proxy. Supported features not in the <code>features</code> parameter will have their default values.
+ else if ("put".equals(action) || "restdelete".equals(action)) { //due to variable format for operation eg services/layer3-service-list/8fe4ba4f-35cf-4d9b-a04a-fd3f5d4c5cc9
+ value = msoPropertiesFactory.getMsoJavaProperties(MSO_PROP_SDNC_ADAPTER).getProperty(key, "");
+ } catch (MsoPropertiesException e) {
+ msoLogger.error (MessageEnum.LOAD_PROPERTIES_FAIL, "Unknown. Mso Properties ID not found in cache: " + MSO_PROP_SDNC_ADAPTER, "SDNC", "", MsoLogger.ErrorCode.DataError, "Exception - Mso Properties ID not found in cache", e);
+ value="";
+ }
+
+ if (value != null && value.length() > 0) {
+
+ String[] parts = value.split("\\|"); //escape pipe
+ value = msoPropertiesFactoryp.getMsoJavaProperties(MSO_PROP_SDNC_ADAPTER).getProperty(key, defaultValue);
+ } catch (MsoPropertiesException e) {
+ msoLogger.error (MessageEnum.NO_PROPERTIES, "Unknown. Mso Properties ID not found in cache: " + MSO_PROP_SDNC_ADAPTER, "SDNC", "", MsoLogger.ErrorCode.DataError, "Exception - Mso Properties ID not found in cache", e);
+ error = "Unable to set authorization in callback request " + e2.getMessage();
+ msoLogger.error(MessageEnum.RA_SET_CALLBACK_AUTH_EXC, "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Exception - Unable to set authorization in callback request", e2);
+ // Add private constructor to prevent instance creation.
+ private SDNCRequestIdUtil () {}
+
+ public static String getSDNCOriginalRequestId (String newRequestId) {
+
+ // Camunda scripts will add postfix, such as -1, -2, on the original requestID, to make sure requestID is unique while sending request to SDNC
+ // In order to use the unique requestID in logging, need to remove the postfix added by the Camunda scripts
+ // Verify whether the requestId is a valid UUID with postfix (-1, -2). If yes, it should contain 5 times char '-', since valid UUID contains 4 times '-'
+ // If the requestId is not a valid UUID with postfix, we do nothing
+ LOGGER.recordMetricEvent (queryTenantStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryTenant", null);
+ } catch (MsoException me) {
+ LOGGER.recordMetricEvent (queryTenantStartTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.CommunicationError, "Exception while communicate with Open Stack", "OpenStack", "QueryTenant", null);
+ LOGGER.error (MessageEnum.RA_CREATE_TENANT_ERR, me.getMessage(), "OpenStack", "createTenant", MsoLogger.ErrorCode.DataError, "Exception while communicate with Open Stack", me);
+ LOGGER.recordMetricEvent (createTenantStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "CreateTenant", null);
+ } catch (MsoException me) {
+ LOGGER.recordMetricEvent (createTenantStartTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.CommunicationError, "Exception while communicate with Open Stack", "OpenStack", "CreateTenant", null);
+ LOGGER.error (MessageEnum.RA_CREATE_TENANT_ERR, me.getMessage(), "OpenStack", "createTenant", MsoLogger.ErrorCode.DataError, "Exception while communicate with Open Stack", me);
+ LOGGER.recordMetricEvent (subStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryTenant", null);
+ LOGGER.error(MessageEnum.RA_SEND_VNF_NOTIF_ERR, "Could not deliver response to BPEL after "+totalretries+" tries: "+toBpelStr, "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Could not deliver response to BPEL");
+ return false;
+ }
+ totalretries++;
+ int sleepinterval = retryint;
+ if (retryint < 0) {
+ // if retry interval is negative double the retry on each pass
+ sleepinterval = -retryint;
+ retryint *= 2;
+ }
+ debug("Sleeping for " + sleepinterval + " seconds.");
+ try {
+ Thread.sleep(sleepinterval * 1000L);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ }
+ private void debug(String m) {
+ LOGGER.debug(m);
+// System.err.println(m);
+ }
+ private void sendOne(final String toBpelStr, final String bpelUrl, final boolean isxml) {
+ LOGGER.debug("Sending to BPEL server: "+bpelUrl);
+ LOGGER.debug("Content is: "+toBpelStr);
+
+ //POST
+ HttpPost post = new HttpPost(bpelUrl);
+ if (credentials != null && !credentials.isEmpty())
+ LOGGER.recordMetricEvent (queryStackStarttime1, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", vnfName);
+ } catch (MsoException me) {
+ // Failed to query the Stack due to an openstack exception.
+ LOGGER.recordMetricEvent (queryStackStarttime2, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", vnfName);
+ } catch (MsoException me) {
+ // Failed to query the Stack due to an openstack exception.
+ // Convert to a generic VnfException
+ me.addContext ("UpdateVNF");
+ String error = "Update VNF: Attached heatStack ID Query " + nestedStackId + " in " + cloudSiteId + "/" + tenantId + ": " + me ;
+ // Ignore MsoTenantNotFound and MsoStackAlreadyExists exceptions
+ // because we already checked for those.
+ long updateStackStarttime = System.currentTimeMillis ();
+ try {
+ heatStack = heatU.updateStack (cloudSiteId,
+ tenantId,
+ vnfName,
+ template,
+ copyStringInputs (inputs),
+ true,
+ heatTemplate.getTimeoutMinutes (),
+ newEnvironmentString,
+ //heatEnvironmentString,
+ nestedTemplatesChecked,
+ heatFilesObjects);
+ LOGGER.recordMetricEvent (updateStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "UpdateStack", vnfName);
+ LOGGER.recordMetricEvent (subStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", vnfName);
+ } catch (MsoException me) {
+ me.addContext ("QueryVNF");
+ // Failed to query the Stack due to an openstack exception.
+ LOGGER.recordMetricEvent (subStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteStack", vnfName);
+ } catch (MsoException me) {
+ me.addContext ("DeleteVNF");
+ // Failed to query the Stack due to an openstack exception.
+ LOGGER.recordMetricEvent (subStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteStack", null);
+ } catch (MsoException me) {
+ // Failed to rollback the Stack due to an openstack exception.
+ LOGGER.recordMetricEvent (subStartTime1, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", vfModuleName);
+ LOGGER.recordMetricEvent (subStartTime2, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", vfModuleName);
+ } catch (MsoException me) {
+ // Failed to query the Stack due to an openstack exception.
+ // Convert to a generic VnfException
+ me.addContext ("CreateVFModule");
+ String error = "Create VFModule: Attached heatStack ID Query " + nestedStackId + " in " + cloudSiteId + "/" + tenantId + ": " + me ;
+ LOGGER.recordMetricEvent (subStartTime3, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "QueryStack", vfModuleName);
+ } catch (MsoException me) {
+ // Failed to query the Stack due to an openstack exception.
+ // Convert to a generic VnfException
+ me.addContext ("CreateVFModule");
+ String error = "Create VFModule: Attached baseHeatStack ID Query " + nestedBaseStackId + " in " + cloudSiteId + "/" + tenantId + ": " + me ;
+ if (nestedBaseHeatStack == null || nestedBaseHeatStack.getStatus() == HeatStatus.NOTFOUND) {
+ String error = "Create VFModule: Attached base heatStack ID DOES NOT EXIST " + nestedBaseStackId + " in " + cloudSiteId + "/" + tenantId + " USER ERROR" ;
+ LOGGER.error (MessageEnum.RA_QUERY_VNF_ERR, vfModuleName, cloudSiteId, tenantId, error, "OpenStack", "QueryStack", MsoLogger.ErrorCode.BusinessProcesssError, "Create VFModule: Attached base heatStack ID DOES NOT EXIST");
+ // By the time we get here - heatTemplateId and heatEnvtId should be populated (or null)
+ HeatTemplate heatTemplate = null;
+ if (heatTemplateId == null) {
+ String error = "Create: No Heat Template ID defined in catalog database for " + vnfType + ", reqType=" + requestTypeString;
+ LOGGER.error(MessageEnum.RA_VNF_UNKNOWN_PARAM, "Heat Template ID", vnfType, "OpenStack", "", MsoLogger.ErrorCode.DataError, "Create: No Heat Template ID defined in catalog database");
+ String error = "Create VF/VNF: no entry found for heat template ID = " + heatTemplateId;
+ LOGGER.error(MessageEnum.RA_VNF_UNKNOWN_PARAM,
+ "Heat Template ID",
+ String.valueOf(heatTemplateId), "OpenStack", "", MsoLogger.ErrorCode.BusinessProcesssError, "Create VF/VNF: no entry found for heat template ID = " + heatTemplateId);
+ String error = "Create: No HEAT_FILES entry in catalog database for " + vfModuleType + " at HEAT_FILES index=" + heatFileId;
+ LOGGER.debug(error);
+ LOGGER.error (MessageEnum.RA_VNF_UNKNOWN_PARAM, "HEAT_FILES entry not found at " + heatFileId, vfModuleType, "OpenStack", "", MsoLogger.ErrorCode.BusinessProcesssError, "HEAT_FILES entry not found");
+ // Part 1: parse envt entries to see if reqd parameter is there (before used a simple grep
+ // Part 2: only submit to openstack the parameters in the envt that are in the heat template
+ // Note this also removes any comments
+ MsoHeatEnvironmentEntry mhee = null;
+ if (heatEnvironmentString != null && heatEnvironmentString.contains ("parameters:")) {
+ //LOGGER.debug ("Have an Environment argument with a parameters: section - will bypass checking for valid params - but will still check for aliases");
+ LOGGER.recordMetricEvent (createStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "CreateStack", vfModuleName);
+ } else {
+ heatStack = heat.createStack (cloudSiteId,
+ tenantId,
+ vfModuleName,
+ template,
+ inputsTwo,
+ true,
+ heatTemplate.getTimeoutMinutes (),
+ newEnvironmentString,
+ //heatEnvironmentString,
+ nestedTemplatesChecked,
+ heatFilesObjects,
+ backout.booleanValue());
+
+ }
+ LOGGER.recordMetricEvent (createStackStarttime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "CreateStack", vfModuleName);
+ LOGGER.recordMetricEvent (subStartTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully received response from Open Stack", "OpenStack", "DeleteStack", vnfName);
+ } catch (MsoException me) {
+ me.addContext ("DeleteVNF");
+ // Failed to query the Stack due to an openstack exception.
+ "Unexpected exception caught during the notification processing", "ASDC", "treatNotification", MsoLogger.ErrorCode.SchemaError, "RuntimeException in treatNotification",
+ "Exception caught during Installation of artifact", "ASDC", "processResourceNotification", MsoLogger.ErrorCode.BusinessProcesssError, "Exception in processResourceNotification", e);
+ }
+ }
+
+ private static final String UNKNOWN="Unknown";
+
+ /**
+ * @return the address of the ASDC we are connected to.
+ */
+ public String getAddress () {
+ if (asdcConfig != null) {
+ return asdcConfig.getAsdcAddress ();
+ }
+ return UNKNOWN;
+ }
+
+ /**
+ * @return the environment name of the ASDC we are connected to.
+ public DistributionStatusMessage (final String artifactUrl, final String consumerId,final String distributionId, final DistributionStatusEnum distributionStatusEnum, final long timestampL) {
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Ready</title></head><body>Application Not Ready. Properties file missing or invalid or database Connection failed</body></html>";
+ for (int i = 1; i < aggregatedElements.size(); ++i) {
+ sb.append (", ");
+ sb.append(aggregatedElements.get(i));
+ }
+ }
+ return sb.toString();
+ }
+
+ /**
+ * The type that was defined at creation time. This is typically VNF_RESOURCE, VF_MODULE_METADATA, HEAT_ENV, etc.
+ * @return The type of this element information type. This will usually be either an ArtifactTypeEnum entry name or an ASDCElementTypeEnum entry name.
+ * @see ASDCElementInfo.ASDCElementTypeEnum
+ */
+ public String getType() {
+ return type;
+ }
+
+ /**
+ * Provides the map of all element information entries for this type.
+ * @return A map of all element information entries which will be used by the toString() method.
+ throw new ArtifactInstallerException("Artifact (UUID:"+artifactUUID+ ") referenced in the VFModule UUID list has not been downloaded, cancelling the Resource deployment");
+ for (IArtifactInfo unknownArtifact:vfModuleStructure.getArtifactsMap().get(ASDCConfiguration.HEAT_VOL).get(0).getArtifactInfo().getRelatedArtifacts()) {
+ if (heatNestedArtifact.getArtifactInfo().getArtifactUUID().equals(unknownArtifact.getArtifactUUID())) {
+ * THis class tests the ASDC Controller by using the ASDC Mock CLient
+ *
+ *
+ */
+public class ASDCConfigurationTest {
+
+ public static MsoPropertiesFactory msoPropertiesFactory = new MsoPropertiesFactory();
+ public final String ASDC_PROP = ASDCConfigurationTest.class.getClassLoader().getResource("mso.json").toString().substring(5);
+ public final String ASDC_PROP2 = ASDCConfigurationTest.class.getClassLoader().getResource("mso2.json").toString().substring(5);
+ public final String ASDC_PROP3 = ASDCConfigurationTest.class.getClassLoader().getResource("mso3.json").toString().substring(5);
+ public final String ASDC_PROP_BAD = ASDCConfigurationTest.class.getClassLoader().getResource("mso-bad.json").toString().substring(5);
+ public final String ASDC_PROP_WITH_NULL = ASDCConfigurationTest.class.getClassLoader().getResource("mso-with-NULL.json").toString().substring(5);
+ public final String ASDC_PROP_DOUBLE_CONFIG = ASDCConfigurationTest.class.getClassLoader().getResource("mso-two-configs.json").toString().substring(5);
+ public final String ASDC_PROP4_WITH_TLS = ASDCConfigurationTest.class.getClassLoader().getResource("mso4-with-TLS.json").toString().substring(5);
+
+ @BeforeClass
+ public static final void prepareBeforeAllTests() {
+ msoPropertiesFactory.removeAllMsoProperties();
+ }
+
+ @Before
+ public final void prepareBeforeEachTest () throws MsoPropertiesException {
+ public static final String ASDC_PROP = MsoJavaProperties.class.getClassLoader().getResource("mso.json").toString().substring(5);
+ public static final String ASDC_PROP2 = MsoJavaProperties.class.getClassLoader().getResource("mso2.json").toString().substring(5);
+ public static final String ASDC_PROP3 = MsoJavaProperties.class.getClassLoader().getResource("mso3.json").toString().substring(5);
+ public static final String ASDC_PROP_BAD = MsoJavaProperties.class.getClassLoader().getResource("mso-bad.json").toString().substring(5);
+ public static final String ASDC_PROP_WITH_NULL = MsoJavaProperties.class.getClassLoader().getResource("mso-with-NULL.json").toString().substring(5);
+
+ @BeforeClass
+ public static final void prepareMockNotification() throws MsoPropertiesException, IOException, URISyntaxException, NoSuchAlgorithmException, ArtifactInstallerException {
+
+ heatExample = new String(Files.readAllBytes(Paths.get(ASDCControllerTest.class.getClassLoader().getResource("resource-examples/autoscaling.yaml").toURI())));
+ public static final String ASDC_PROP = MsoJavaProperties.class.getClassLoader().getResource("mso.json").toString().substring(5);
+ public static final String ASDC_PROP2 = MsoJavaProperties.class.getClassLoader().getResource("mso2.json").toString().substring(5);
+ public static final String ASDC_PROP3 = MsoJavaProperties.class.getClassLoader().getResource("mso3.json").toString().substring(5);
+ public static final String ASDC_PROP_BAD = MsoJavaProperties.class.getClassLoader().getResource("mso-bad.json").toString().substring(5);
+ public static final String ASDC_PROP_WITH_NULL = MsoJavaProperties.class.getClassLoader().getResource("mso-with-NULL.json").toString().substring(5);
+ public static final String ASDC_PROP_WITH_DOUBLE = MsoJavaProperties.class.getClassLoader().getResource("mso-two-configs.json").toString().substring(5);
+ public static final String ASDC_PROP_WITH_DOUBLE2 = MsoJavaProperties.class.getClassLoader().getResource("mso-two-configs2.json").toString().substring(5);
+
+ @BeforeClass
+ public static final void prepareMockNotification() throws MsoPropertiesException, IOException, URISyntaxException, NoSuchAlgorithmException, ArtifactInstallerException {
+
+ heatExample = new String(Files.readAllBytes(Paths.get(ASDCGlobalControllerTest.class.getClassLoader().getResource("resource-examples/autoscaling.yaml").toURI())));
+ public void addParameterListTest() throws Exception {
+
+ InputStream input = new FileInputStream(new File("src/test/resources/resource-examples/simpleTest.yaml"));
+ YamlEditor decoder = new YamlEditor (IOUtils.toByteArray(input));
+
+ Set <HeatTemplateParam> newParamSet = new HashSet <HeatTemplateParam> ();
+
+ HeatTemplateParam heatParam1 = new HeatTemplateParam();
+ heatParam1.setId(1);
+ heatParam1.setParamName("testos1");
+ heatParam1.setParamType("string");
+
+ HeatTemplateParam heatParam2 = new HeatTemplateParam();
+ heatParam2.setId(2);
+ heatParam2.setParamName("testos2");
+ heatParam2.setParamType("number");
+
+ newParamSet.add(heatParam1);
+ newParamSet.add(heatParam2);
+
+ decoder.addParameterList(newParamSet);
+
+ Set <HeatTemplateParam> paramSet = decoder.getParameterList();
+
+ assertTrue(paramSet.size() == 7);
+
+ Boolean check1 = Boolean.FALSE;
+ Boolean check2 = Boolean.FALSE;
+
+ for (HeatTemplateParam param : paramSet) {
+ if ("ip_port_snmp_manager".equals(param.getParamName()) || "cor_direct_net_name".equals(param.getParamName()) || "cor_direct_net_RT".equals(param.getParamName())) {
+ assertFalse(param.isRequired());
+ } else {
+ assertTrue(param.isRequired());
+ }
+
+ if ("testos1".equals(param.getParamName()) && "string".equals(param.getParamType())) {
+ check1=Boolean.TRUE;
+ }
+
+ if ("testos2".equals(param.getParamName()) && "number".equals(param.getParamType())) {
+ check2=Boolean.TRUE;
+ }
+
+ }
+
+ assertTrue(check1);
+ assertTrue(check2);
+ }
+
+ @Test
+ public void VfResourceInstallerTest() throws Exception {
+
+ assertTrue("mon ami toto, est dans le bois: toto ici".equals(VfResourceInstaller.verifyTheFilePrefixInString("mon ami toto, est dans le bois: toto ici","toto")));
+ assertTrue("mon ami toto, est dans le bois: toto ici".equals(VfResourceInstaller.verifyTheFilePrefixInString("mon ami file:///toto, est dans le bois: file:///toto ici","toto")));
+ assertTrue("mon ami toto, est dans le bois: toto ici".equals(VfResourceInstaller.verifyTheFilePrefixInString("mon ami file:///toto, est dans le bois: toto ici","toto")));
+ assertTrue("mon ami toto, est dans le bois: toto ici".equals(VfResourceInstaller.verifyTheFilePrefixInString("mon ami toto, est dans le bois: file:///toto ici","toto")));
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not started. Properties file missing or invalid or database Connection failed</body></html>";
+ private static final String NOT_HEALTHY = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not available or at least one of the sub-modules is not available.</body></html>";
+ public static final Response HEALTH_CHECK_RESPONSE = Response.status (HttpStatus.SC_OK)
+ .entity (CHECK_HTML)
+ .build ();
+ public static final Response HEALTH_CHECK_NOK_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_HEALTHY)
+ . build ();
+ public static final Response NOT_STARTED_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_FOUND)
+ .build ();
+
+ @HEAD
+ @GET
+ @Path("/healthcheck")
+ @Produces("text/html")
+ public Response healthcheck (@QueryParam("requestId") String requestId) {
+ * Returns String cloud region id, (ie, cloud-region-id)
+ * @param execution
+ * @param url - url for AAI get cloud region
+ * @param backend - "PO" - real region, or "SDNC" - v2.5 (fake region).
+ */
+
+ //TODO: We should refactor this method to return WorkflowException instead of Error. Also to throw MSOWorkflowException which the calling flow will then catch.
+
+ public String getAAICloudReqion(Execution execution, String url, String backend, inputCloudRegion){
+ //Check request_id for the incoming request type
+ //For INFRA_ACTIVE_REQUESTS payload request-id IS optional (Not sure why this is option since req id is primary key ... also tried exe through SOAP UI to check if MSO code handles null like auto generated seq not it does not)
+ //For ACTIVE_REQUESTS payload request-id is NOT optional
+ execution.setVariable("UnexpectedError", "Caught a Java Lang Exception") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Caught a Java Lang Exception")
+ utils.log("DEBUG", "Caught Exception during processJavaException Method: " + e, isDebugEnabled)
+ execution.setVariable("UnexpectedError", "Exception in processJavaException method") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Exception in processJavaException method")
+ execution.setVariable("CRESI_unexpectedError", "Caught a Java Lang Exception") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Caught a Java Lang Exception")
+ utils.log("DEBUG", "Caught Exception during processJavaException Method: " + e, isDebugEnabled)
+ execution.setVariable("CRESI_unexpectedError", "Exception in processJavaException method") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Exception in processJavaException method")
+ execution.setVariable("UnexpectedError", "Caught a Java Lang Exception") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Caught a Java Lang Exception")
+ utils.log("DEBUG", "Caught Exception during processJavaException Method: " + e, isDebugEnabled)
+ execution.setVariable("UnexpectedError", "Exception in processJavaException method") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Exception in processJavaException method")
+ execution.setVariable("DELSI_unexpectedError", "Caught a Java Lang Exception") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Caught a Java Lang Exception")
+ utils.log("DEBUG", "Caught Exception during processJavaException Method: " + e, isDebugEnabled)
+ execution.setVariable("DELSI_unexpectedError", "Exception in processJavaException method") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Exception in processJavaException method")
+ utils.log("DEBUG", "Could not find Tenant Id element in Volume Group with Volume Group Id ${volumeGroupId}", isDebugLogEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 2500, "Could not find Tenant Id element in Volume Group with Volume Group Id ${volumeGroupId}")
+ 5000 Received error from A&AI ($A&AI_ERROR) Asynchronous During orchestration of the recipe, A&AI returned an error. The error returned by A&AI is passed through in $A&AI_ERROR.
+ 5010 Could not communicate with A&AI Asynchronous During orchestration of the recipe, a connection with A&AI could not be established.
+ 5020 No response from A&AI Asynchronous During orchestration of the recipe, communication was established with A&AI, but no response was received within the configured timeout.
+ //Check request_id for the incoming request type
+ //For INFRA_ACTIVE_REQUESTS payload request-id IS optional (Not sure why this is option since req id is primary key ... also tried exe through SOAP UI to check if MSO code handles null like auto generated seq not it does not)
+ //For ACTIVE_REQUESTS payload request-id is NOT optional
+ //Check if ErrorCode node exists. If yes, initialize it from request xml, if no, it will stay with defaulf value already set in initializeProcessVariables() method above.
+ //Check if ErrorMessage node exists. If yes, initialize it from request xml, if no, it will stay with defaulf value already set in initializeProcessVariables() method above.
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Incoming Type is Invalid. Please Specify Type as service-instance or service-subscription")
+ utils.log("DEBUG", "SI Resource Version is: " + resourceVersion, isDebugEnabled)
+ }else{
+ utils.log("DEBUG", "GET Vnf for Resource Version Response Does NOT Contain a resource-version", isDebugEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 400, "Unable to obtain Vnf resource-version. GET Vnf Response Does NOT Contain a resource-version")
+ }
+
+ }else if(responseCode == 404){
+ utils.log("DEBUG", "GET Vnf Received a Not Found (404) Response", isDebugEnabled)
+ utils.log("DEBUG", "Incoming serviceInstanceId and serviceInstanceName are null. ServiceInstanceId or ServiceInstanceName is required to Get a service-instance.", isDebugEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Incoming serviceInstanceId and serviceInstanceName are null. ServiceInstanceId or ServiceInstanceName is required to Get a service-instance.")
+ }else{
+ utils.log("DEBUG", "Incoming Service Instance Id is: " + serviceInstanceId, isDebugEnabled)
+ utils.log("DEBUG", "Incoming Service Instance Name is: " + serviceInstanceName, isDebugEnabled)
+ utils.log("DEBUG", "Incoming ServiceType or GlobalCustomerId is null. These variables are required to Get a service-subscription.", isDebugEnabled)
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Incoming ServiceType or GlobalCustomerId is null. These variables are required to Get a service-subscription.")
+ }else{
+ utils.log("DEBUG", "Incoming Service Type is: " + serviceType, isDebugEnabled)
+ utils.log("DEBUG", "Incoming Global Customer Id is: " + globalCustomerId, isDebugEnabled)
+ }
+ }else{
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Incoming Type is Invalid. Please Specify Type as service-instance or service-subscription")
+ }
+ }else{
+ exceptionUtil.buildAndThrowWorkflowException(execution, 2500, "Incoming GENGS_type is null. Variable is Required.")
+ exceptionUtil.buildWorkflowException(execution, 5000, "SDNCAdapter Encountered an Internal Error") // TODO: Not sure what message and error code we want here.....
+ }else{
+ execution.setVariable("WorkflowException", wf)
+ }
+
+ utils.log("DEBUG","Outgoing WorkflowException is: " + execution.getVariable("WorkflowException"), isDebugEnabled)
+ utils.log("DEBUG","=========== End Assign Error ===========", isDebugEnabled)
+ execution.setVariable("UnexpectedError", "Caught a Java Lang Exception") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Caught a Java Lang Exception")
+ utils.log("DEBUG", "Caught Exception during processJavaException Method: " + e, isDebugEnabled)
+ execution.setVariable("UnexpectedError", "Exception in processJavaException method") // Adding this line temporarily until this flows error handling gets updated
+ exceptionUtil.buildAndThrowWorkflowException(execution, 500, "Exception in processJavaException method")
+ exceptionUtil.buildAndThrowWorkflowException(execution, 2500, "Could not find Tenant Id element in Volume Group with Volume Group Id " + volumeGroupId
+ logDebug("Received Tenant Id " + volumeGroupTenantId + " from AAI for Volume Group with Volume Group Id " + volumeGroupId + ", AIC Cloud Region " + aicCloudRegion, isDebugLogEnabled)
+ * Java content interface and Java element interface
+ * generated in the com.att.domain2.workflow.sdnc.adapter.schema.v1 package.
+ * <p>An ObjectFactory allows you to programatically
+ * construct new instances of the Java representation
+ * for XML content. The Java representation of XML
+ * content can consist of schema derived interfaces
+ * and classes representing the binding of schema
+ * type definitions, element declarations and model
+ * groups. Factory methods for each of these are
+ * provided in this class.
+ *
+ */
+@XmlRegistry
+public class ObjectFactory {
+
+
+ /**
+ * Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: com.att.domain2.workflow.sdnc.adapter.schema.v1
+ *
+ */
+ public ObjectFactory() {
+ }
+
+ /**
+ * Create an instance of {@link RequestHeader }
+ *
+ */
+ public RequestHeader createRequestHeader() {
+ return new RequestHeader();
+ }
+
+ /**
+ * Create an instance of {@link SDNCAdapterResponse }
+ *
+ */
+ public SDNCAdapterResponse createSDNCAdapterResponse() {
+ return new SDNCAdapterResponse();
+ }
+
+ /**
+ * Create an instance of {@link SDNCAdapterCallbackRequest }
+ *
+ */
+ public SDNCAdapterCallbackRequest createSDNCAdapterCallbackRequest() {
+ return new SDNCAdapterCallbackRequest();
+ }
+
+ /**
+ * Create an instance of {@link CallbackHeader }
+ *
+ */
+ public CallbackHeader createCallbackHeader() {
+ return new CallbackHeader();
+ }
+
+ /**
+ * Create an instance of {@link SDNCAdapterRequest }
+ *
+ */
+ public SDNCAdapterRequest createSDNCAdapterRequest() {
+ * Java content interface and Java element interface
+ * generated in the org.openecomp.mso.adapters.vnf.async.client package.
+ * <p>An ObjectFactory allows you to programatically
+ * construct new instances of the Java representation
+ * for XML content. The Java representation of XML
+ * content can consist of schema derived interfaces
+ * and classes representing the binding of schema
+ * type definitions, element declarations and model
+ * groups. Factory methods for each of these are
+ * provided in this class.
+ *
+ */
+@XmlRegistry
+public class ObjectFactory {
+
+ private final static QName _RollbackVnfNotification_QNAME = new QName("http://com.att.mso/vnfNotify", "rollbackVnfNotification");
+ private final static QName _DeleteVnfNotification_QNAME = new QName("http://com.att.mso/vnfNotify", "deleteVnfNotification");
+ private final static QName _CreateVnfNotification_QNAME = new QName("http://com.att.mso/vnfNotify", "createVnfNotification");
+ private final static QName _UpdateVnfNotification_QNAME = new QName("http://com.att.mso/vnfNotify", "updateVnfNotification");
+ private final static QName _QueryVnfNotification_QNAME = new QName("http://com.att.mso/vnfNotify", "queryVnfNotification");
+
+ /**
+ * Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: org.openecomp.mso.adapters.vnf.async.client
+ *
+ */
+ public ObjectFactory() {
+ }
+
+ /**
+ * Create an instance of {@link UpdateVnfNotification }
+ *
+ */
+ public UpdateVnfNotification createUpdateVnfNotification() {
+ return new UpdateVnfNotification();
+ }
+
+ /**
+ * Create an instance of {@link UpdateVnfNotification.Outputs }
+ *
+ */
+ public UpdateVnfNotification.Outputs createUpdateVnfNotificationOutputs() {
+ return new UpdateVnfNotification.Outputs();
+ }
+
+ /**
+ * Create an instance of {@link QueryVnfNotification }
+ *
+ */
+ public QueryVnfNotification createQueryVnfNotification() {
+ return new QueryVnfNotification();
+ }
+
+ /**
+ * Create an instance of {@link QueryVnfNotification.Outputs }
+ *
+ */
+ public QueryVnfNotification.Outputs createQueryVnfNotificationOutputs() {
+ return new QueryVnfNotification.Outputs();
+ }
+
+ /**
+ * Create an instance of {@link CreateVnfNotification }
+ *
+ */
+ public CreateVnfNotification createCreateVnfNotification() {
+ return new CreateVnfNotification();
+ }
+
+ /**
+ * Create an instance of {@link CreateVnfNotification.Outputs }
+ *
+ */
+ public CreateVnfNotification.Outputs createCreateVnfNotificationOutputs() {
+ return new CreateVnfNotification.Outputs();
+ }
+
+ /**
+ * Create an instance of {@link DeleteVnfNotification }
+ *
+ */
+ public DeleteVnfNotification createDeleteVnfNotification() {
+ return new DeleteVnfNotification();
+ }
+
+ /**
+ * Create an instance of {@link RollbackVnfNotification }
+ *
+ */
+ public RollbackVnfNotification createRollbackVnfNotification() {
+ return new RollbackVnfNotification();
+ }
+
+ /**
+ * Create an instance of {@link MsoRequest }
+ *
+ */
+ public MsoRequest createMsoRequest() {
+ return new MsoRequest();
+ }
+
+ /**
+ * Create an instance of {@link VnfRollback }
+ *
+ */
+ public VnfRollback createVnfRollback() {
+ return new VnfRollback();
+ }
+
+ /**
+ * Create an instance of {@link UpdateVnfNotification.Outputs.Entry }
+ *
+ */
+ public UpdateVnfNotification.Outputs.Entry createUpdateVnfNotificationOutputsEntry() {
+ return new UpdateVnfNotification.Outputs.Entry();
+ }
+
+ /**
+ * Create an instance of {@link QueryVnfNotification.Outputs.Entry }
+ *
+ */
+ public QueryVnfNotification.Outputs.Entry createQueryVnfNotificationOutputsEntry() {
+ return new QueryVnfNotification.Outputs.Entry();
+ }
+
+ /**
+ * Create an instance of {@link CreateVnfNotification.Outputs.Entry }
+ *
+ */
+ public CreateVnfNotification.Outputs.Entry createCreateVnfNotificationOutputsEntry() {
+ return new CreateVnfNotification.Outputs.Entry();
+ }
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link RollbackVnfNotification }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://com.att.mso/vnfNotify", name = "rollbackVnfNotification")
+ public JAXBElement<RollbackVnfNotification> createRollbackVnfNotification(RollbackVnfNotification value) {
+ return new JAXBElement<RollbackVnfNotification>(_RollbackVnfNotification_QNAME, RollbackVnfNotification.class, null, value);
+ }
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link DeleteVnfNotification }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://com.att.mso/vnfNotify", name = "deleteVnfNotification")
+ public JAXBElement<DeleteVnfNotification> createDeleteVnfNotification(DeleteVnfNotification value) {
+ return new JAXBElement<DeleteVnfNotification>(_DeleteVnfNotification_QNAME, DeleteVnfNotification.class, null, value);
+ }
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link CreateVnfNotification }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://com.att.mso/vnfNotify", name = "createVnfNotification")
+ public JAXBElement<CreateVnfNotification> createCreateVnfNotification(CreateVnfNotification value) {
+ return new JAXBElement<CreateVnfNotification>(_CreateVnfNotification_QNAME, CreateVnfNotification.class, null, value);
+ }
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link UpdateVnfNotification }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://com.att.mso/vnfNotify", name = "updateVnfNotification")
+ public JAXBElement<UpdateVnfNotification> createUpdateVnfNotification(UpdateVnfNotification value) {
+ return new JAXBElement<UpdateVnfNotification>(_UpdateVnfNotification_QNAME, UpdateVnfNotification.class, null, value);
+ }
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link QueryVnfNotification }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://com.att.mso/vnfNotify", name = "queryVnfNotification")
+ public JAXBElement<QueryVnfNotification> createQueryVnfNotification(QueryVnfNotification value) {
+ return new JAXBElement<QueryVnfNotification>(_QueryVnfNotification_QNAME, QueryVnfNotification.class, null, value);
+ * A list of {@link javax.xml.ws.WebServiceFeature} to configure on the proxy. Supported features not in the <code>features</code> parameter will have their default values.
+def createVfModuleVolumeInfraV1 = new CreateVfModuleVolumeInfraV1()
+createVfModuleVolumeInfraV1.executeMethod('buildWorkflowException', execution, 2500, "Volume group name not present in request.", isDebugLogEnabled)]]></bpmn2:script>\r
+def createVfModuleVolumeInfraV1 = new CreateVfModuleVolumeInfraV1()
+createVfModuleVolumeInfraV1.executeMethod('buildWorkflowException', execution, 2500, "Service instance id not found in AAI: $CVMVINFRAV1_serviceInstanceId.", isDebugLogEnabled)]]></bpmn2:script>\r
+def doCreateVfModuleVolumeV1 = new DoCreateVfModuleVolumeV1()
+doCreateVfModuleVolumeV1.executeMethod('buildWorkflowException', execution, 2500, "Volume group $DCVFMODVOLV1_volumeGroupName already exists in the system.", isDebugLogEnabled)]]></bpmn2:script>\r
+ <bpmn2:text>Catch MsoException triggered by Plugin. The vnfAdapterRest subflow is currently not directly throwing the BPMNError(MSOException.</bpmn2:text>\r
+ <bpmn2:sequenceFlow id="SequenceFlow_28" name="Generic Vnf does not exist
 and Vnf Id != null or
Generic Vnf does exist
and Vnf Id == null" sourceRef="ExclusiveGateway_5" targetRef="CreateGenericVnfFailure">\r
+ <bpmn2:documentation>This flow expects its incoming request to be in the variable 'CreateAAIVfModuleVolumeGroupRequest'. This flow produces no output.</bpmn2:documentation>\r
+ <bpmn2:scriptTask id="QueryAAIForVfModule" name="Query AAI for VF Module" scriptFormat="groovy">\r
+ <bpmn2:documentation>This flow expects its incoming request to be in the variable 'DoUpdateVfModuleRequest'. This flow produces no output.</bpmn2:documentation>\r
+ <bpmn2:documentation><![CDATA[This flow expects its incoming request to be in the variable 'PrepareUpdateAAIVfModuleRequest'. This flow has 2 outputs: The heat stack ID is placed in the variable 'PUAAIVfMod_heatStackId'.
+and the updated VF Module (type=VfModule) is placed in the variable 'PUAAIVfMod_outVfModule'.]]></bpmn2:documentation>\r
+ <bpmn2:scriptTask id="QueryAAIForGenericVnf" name="Query AAI for Generic Vnf" scriptFormat="groovy">\r
+ <bpmn2:documentation>This flow expects its incoming request to be in the variable 'UpdateAAIGenericVnfRequest'. This flow produces no output.</bpmn2:documentation>\r
+ <bpmn2:scriptTask id="QueryAAIForGenericVNF" name="Query AAI for Generic VNF" scriptFormat="groovy">\r
+ <bpmn2:documentation>This flow expects its incoming request to be in the variable 'UpdateAAIVfModuleRequest'. This flow produces no output.</bpmn2:documentation>\r
+ <bpmn2:scriptTask id="QueryAAIForVfModule" name="Query AAI for VF Module" scriptFormat="groovy">\r
+ <target name="deploy.jboss" depends="package.mvn, install.cockpit.plugin" description="Copies the cockit plugin to the deployment directory defined in '${basedir}/build.properties' or '${user.home}/.camunda/build.properties'" />
+
+ <target name="deploy.tomcat" depends="package.mvn" description="Copies the cockpit plugin to the deployment directory defined in '${basedir}/build.properties' or '${user.home}/.camunda/build.properties'">
+ <fail unless="deploy.tomcat.dir" message="No deployment folder has been configured. Please copy the file '${basedir}/build.properties.example' to '${basedir}/build.properties' or '${user.home}/.camunda/build.properties' and change it according to your environment." />
+ <fail unless="deploy.jboss.dir" message="No deployment folder has been configured. Please copy the file '${basedir}/build.properties.example' to '${basedir}/build.properties' or '${user.home}/.camunda/build.properties' and change it according to your environment." />
+ <target name="undeploy.jboss" description="Deletes the cockpit plugin from the deployment directory defined in '${basedir}/build.properties' or '${user.home}/.camunda/build.properties'">
+ <target name="undeploy.tomcat" description="Deletes the cockpit plugin from the deployment directory defined in '${basedir}/build.properties' or '${user.home}/.camunda/build.properties'">
+public class MyBatisExtendedSessionFactory extends StandaloneProcessEngineConfiguration {
+
+ private String resourceName;
+
+ protected void init() {
+ throw new IllegalArgumentException(
+ "Normal 'init' on process engine only used for extended MyBatis mappings is not allowed, please use 'initFromProcessEngineConfiguration'. You cannot construct a process engine with this configuration.");
+ }
+
+ /**
+ * initialize the {@link ProcessEngineConfiguration} from an existing one,
+ * just using the database settings and initialize the database / MyBatis
+ * stuff.
+ */
+ public void initFromProcessEngineConfiguration(ProcessEngineConfigurationImpl processEngineConfiguration, String resourceName) {
+ return Response.status (500).entity ("Not able to found Debug appender. Please verify to make sure the needed logback file is present in the config folder.").build ();
+ throw new MsoPropertiesException("Unable to load the MSO properties file because format is not recognized (only .json or .properties): " + propertiesFilePath);
+ Assert.assertTrue(stringArray[0].contains("|reqId|servId|main||testRecordMetricEvent||VNF|createVNF|COMPLETE|0|Successful|Test UUID as JBoss not found|INFO|0|"));
+ Assert.assertTrue(stringArray[1].contains("|reqId|servId|main||testRecordMetricEvent|testUser|SDNC|removeSDNC|ERROR|501|Exception|Test UUID as JBoss not found|INFO|0|") && stringArray[1].contains("|127.0.0.1||||testVNF|||||"));
+ Assert.assertTrue (stringArray[1].contains("|reqId|servId|main||testRecordAuditEvent|testUser|ERROR|501|Exception|Test UUID as JBoss not found|INFO|0|") && stringArray[1].contains("|127.0.0.1||||||||"));
+ fail ("MsoPropertiesException should have been raised");
+ } catch (MsoPropertiesException ep) {
+ assertTrue(("Unable to load the MSO properties file because format is not recognized (only .json or .properties): new_file.toto").equals(ep.getMessage()));
+ MsoPropertiesFactory msoPropertiesFactory0 = new MsoPropertiesFactory();
+ // Undeclared exception!
+ try {
+ msoPropertiesFactory0.changeMsoPropertiesFilePath((String) null, "Unable to load the MSO properties file because format is not recognized (only .json or .properties): ");
+ errors.put(ErrorNumbers.RECIPE_DOES_NOT_EXIST, "Recipe for %s-type and action specified does not exist in catalog %s");
+ errors.put(ErrorNumbers.SERVICE_PARAMETERS_FAILED_SCHEMA_VALIDATION, "Service specific parameters passed in request FAILED schema validation. %s");
+
+ errors.put(ErrorNumbers.LOCKED_CREATE_ON_THE_SAME_VNF_NAME_IN_PROGRESS, "%s-name (%s) is locked (status = %s) because already working on a CREATE request with same %s-name.");
+ errors.put(ErrorNumbers.LOCKED_SAME_ACTION_AND_VNF_ID, "%s-id (%s) is locked (status = %s) because already working on a request with same action (%s) for this %s-id.");
+ errors.put(ErrorNumbers.REQUEST_TIMED_OUT, "Service request timed out before completing");
+ errors.put(ErrorNumbers.ERROR_FROM_BPEL, "BPEL returned an error: %s");
+ errors.put(ErrorNumbers.NO_COMMUNICATION_TO_BPEL, "Could not communicate with BPEL %s");
+ errors.put(ErrorNumbers.NO_RESPONSE_FROM_BPEL, "No response from BPEL %s");
+ errors.put(ErrorNumbers.COULD_NOT_WRITE_TO_REQUESTS_DB, "Could not insert or update record in MSO_REQUESTS DB %s");
+ errors.put(ErrorNumbers.NO_COMMUNICATION_TO_REQUESTS_DB, "Could not communicate with MSO_REQUESTS DB %s");
+ errors.put(ErrorNumbers.NO_COMMUNICATION_TO_CATALOG_DB, "Could not communicate with MSO_CATALOG DB %s");
+ errors.put(ErrorNumbers.ERROR_FROM_CATALOG_DB, "Received error from MSO_CATALOG DB %s");
+ throw new ValidationException ("format for network request");
+ }
+
+ if (isWrongRootElement) {
+ msoLogger.error (MessageEnum.APIH_REQUEST_VALIDATION_ERROR_REASON, "root element is not correct", "", "", MsoLogger.ErrorCode.DataError, "root element <network-request> expected");
+ throw new ValidationException ("root element <network-request> expected");
+ }
+
+ if (networkReq == null) {
+ throw new ValidationException ("network-request");
+ }
+
+ this.rinfo = networkReq.getRequestInfo ();
+
+ if (this.rinfo == null) {
+ throw new ValidationException ("request-info");
+ }
+
+ action = this.rinfo.getAction ();
+ if (action == null) {
+ throw new ValidationException ("action");
+ }
+
+ if (!InfraUtils.isActionAllowed (props, "network", version, action.value ())) {
+ throw new ValidationException ("action allowable for version " + version + " of network request");
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not started, properties file missing or invalid or Database Connection failed</body></html>";
+
+ private static final Response NOT_STARTED_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_FOUND)
+ .build ();
+
+ @GET
+ public Response queryFilters (@QueryParam("network-type") String networkType,
+ "CREATE on the same Network Name is already progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicates request - CREATE on the same Network Name is already progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicates request - CREATE on the same Network Name is already progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ } else {
+ // Check if this request is a duplicate of the one with the same networkId
+ InfraActiveRequests dup = null;
+ msoLogger.debug ("Checking for a duplicate with the same network-id");
+ msoLogger.error (MessageEnum.APIH_DUPLICATE_CHECK_EXC_ATT, "network-id", "", "", MsoLogger.ErrorCode.DataError, "Exception while checking for a duplicate request with the same network-id", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while checking for a duplicate request with the same network-id");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ if (dup != null) {
+ // Found the duplicate record. Return the appropriate error.
+ + " on the same Network Id already in progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicated request on the same Network Id already in progress");
+
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicated request on the same Network Id already in progress.");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ }
+
+ String orchestrationURI = "";
+
+ // Query MSO Catalog DB
+ try (CatalogDatabase db = new CatalogDatabase()) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.DataError, "Exception while creating record in DB", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ "Response from BPEL engine is failed with HTTP Status=" + bpelStatus, "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is failed with HTTP Status=" + bpelStatus);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is with status Failed");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.error (MessageEnum.APIH_BPEL_RESPONSE_ERROR, "Response from BPEL engine is empty", "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is empty");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPEL engine is empty");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ return resp;
+ }
+ }
+ } catch (Exception e) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, "", "", MsoLogger.ErrorCode.DataError, "Exception while communciate with Catalog DB", e);
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, MSO_PROP_APIHANDLER_INFRA, "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception while communciate with Request DB - Infra Request Lookup", e);
+ "Orchestration RequestId " + requestId + " is not found in DB",
+ ErrorNumbers.SVC_DETAILED_SERVICE_ERROR,
+ null);
+ msoLogger.error (MessageEnum.APIH_BPEL_COMMUNICATE_ERROR, MSO_PROP_APIHANDLER_INFRA, "", "", MsoLogger.ErrorCode.BusinessProcesssError, "Null response from RequestDB when searching by RequestId");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DataNotFound, "Null response from RequestDB when searching by RequestId");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ public final static String MSO_PROP_APIHANDLER_INFRA = "MSO_PROP_APIHANDLER_INFRA";
+
+ private static MsoPropertiesFactory msoPropertiesFactory = new MsoPropertiesFactory ();
+
+ @Context
+ private UriInfo uriInfo;
+
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not started, properties file missing or invalid or Database Connection failed</body></html>";
+
+ private static final Response NOT_STARTED_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ String dupMessage = "Error: Locked instance - This " + requestScope + " (" + instance + ") " + "already has a request being worked with a status of " + dup.getRequestStatus() + " (RequestId - " + dup.getRequestId() + "). The existing request must finish or be cleaned up before proceeding.";
+ //List<String> variables = new ArrayList<String>();
+ msoLogger.warn (MessageEnum.APIH_DUPLICATE_FOUND, dupMessage, "", "", MsoLogger.ErrorCode.SchemaError, "Duplicate request - Subscriber already has a request for this service");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while querying Catalog DB");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ db.close();
+ return response;
+ }
+
+ if (recipeLookupResult == null) {
+ msoLogger.error (MessageEnum.APIH_DB_ATTRIBUTE_NOT_FOUND, MSO_PROP_APIHANDLER_INFRA, "", "", MsoLogger.ErrorCode.DataError, "No recipe found in DB");
+ else if (action == Action.createInstance || action == Action.updateInstance){
+ // There is no entry for this vfModuleType with this version, if specified, in VF_MODULE table in Catalog DB.
+ // This request cannot proceed
+ msoLogger.error (MessageEnum.APIH_DB_ATTRIBUTE_NOT_FOUND, MSO_PROP_APIHANDLER_INFRA, "VF Module Type", "", MsoLogger.ErrorCode.DataError, "No VfModuleType found in DB");
+ "VnfType " + msoRequest.getVnfType () + " and VF Module Model Name " + msoRequest.getVfModuleModelName() + serviceVersionText + " not found in MSO Catalog DB",
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.SchemaError, "Exception while creating record in DB", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) camundaJSONResponseBody);
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, MSO_PROP_APIHANDLER_INFRA, "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception while communciate with Request DB - Infra Request Lookup", e);
+ "Orchestration RequestId " + requestId + " is not found in DB",
+ ErrorNumbers.SVC_DETAILED_SERVICE_ERROR,
+ null);
+ msoLogger.error (MessageEnum.APIH_BPEL_COMMUNICATE_ERROR, MSO_PROP_APIHANDLER_INFRA, "", "", MsoLogger.ErrorCode.BusinessProcesssError, "Null response from RequestDB when searching by RequestId");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DataNotFound, "Null response from RequestDB when searching by RequestId");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.ServiceNotAvailable, "Exiting the transaction: Infra API Handler not started, properties file missing or invalid");
+ return NOT_STARTED_RESPONSE;
+ }
+
+ uriInfo.getRequestUri ();
+
+ if (reqXML == null) {
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.BadRequest, "The content of the request is null");
+ msoLogger.warn (MessageEnum.APIH_DUPLICATE_FOUND, "CREATE on the same VNF Name is already progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicates request - CREATE on the same VNF Name is already progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicates request - CREATE on the same VNF Name is already progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ } else {
+ // Check if this request is a duplicate of the one with the same vnfId
+ InfraActiveRequests dup = null;
+ msoLogger.debug ("Checking for a duplicate with the same vnf-id");
+ msoLogger.error (MessageEnum.APIH_DUPLICATE_CHECK_EXC_ATT, "vnf-id", "", "", MsoLogger.ErrorCode.DataError, "Exception while checking for a duplicate request with the same vnf-id", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while checking for a duplicate request with the same vnf-id");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ if (dup != null) {
+ // Found the duplicate record. Return the appropriate error.
+ + " on the same VNF Id already in progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicated request on the same VNF Id already in progress");
+
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicated request on the same VNF Id already in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ }
+
+ String orchestrationURI = "";
+
+ try (CatalogDatabase db = new CatalogDatabase()) {
+
+ Recipe recipe = null;
+
+ if (version.equals(Constants.SCHEMA_VERSION_V1)) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.SchemaError, "Exception while creating record in DB", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ "Response from BPEL engine is failed with HTTP Status=" + bpelStatus, "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is failed with HTTP Status=" + bpelStatus);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is failed");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.error (MessageEnum.APIH_BPEL_RESPONSE_ERROR, "Response from BPEL engine is empty", "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is empty");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPEL engine is empty");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ return resp;
+ }
+ }
+ } catch (Exception e) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception while communciate with Catalog DB", e);
+ "CREATE on the same Network Name is already progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicates request - CREATE on the same Network Name is already progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicates request - CREATE on the same Network Name is already progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ } else {
+ // Check if this request is a duplicate of the one with the same networkId
+ InfraActiveRequests dup = null;
+ msoLogger.debug ("Checking for a duplicate with the same network-id");
+ msoLogger.error (MessageEnum.APIH_DUPLICATE_CHECK_EXC_ATT, "network-id", "", "", MsoLogger.ErrorCode.DataError, "Exception while checking for a duplicate request with the same network-id", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while checking for a duplicate request with the same network-id");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ if (dup != null) {
+ // Found the duplicate record. Return the appropriate error.
+ + " on the same Network Id already in progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicated request on the same Network Id already in progress");
+
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicated request on the same Network Id already in progress.");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ }
+
+ String orchestrationURI = "";
+
+ // Query MSO Catalog DB
+ try (CatalogDatabase db = new CatalogDatabase()) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.DataError, "Exception while creating record in DB", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ "Response from BPEL engine is failed with HTTP Status=" + bpelStatus, "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is failed with HTTP Status=" + bpelStatus);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is with status Failed");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.error (MessageEnum.APIH_BPEL_RESPONSE_ERROR, "Response from BPEL engine is empty", "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is empty");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPEL engine is empty");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ return resp;
+ }
+ }
+ } catch (Exception e) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, "", "", MsoLogger.ErrorCode.DataError, "Exception while communciate with Catalog DB", e);
+ "CREATE on the same Volume Group Name is already progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicates request - CREATE on the same Volume Group Name is already progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicates request - CREATE on the same Volume Group Name is already progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ } else {
+ // Check if this request is a duplicate of the one with the same volumeGroupId
+ InfraActiveRequests dup = null;
+ msoLogger.debug ("Checking for a duplicate with the same volume-group-id");
+ msoLogger.error (MessageEnum.APIH_DUPLICATE_CHECK_EXC_ATT, "volume-group-id", "", "", MsoLogger.ErrorCode.DataError, "Exception while checking for a duplicate request with the sam volume-group-id", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while checking for a duplicate request with the sam volume-group-id");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ if (dup != null) {
+ // Found the duplicate record. Return the appropriate error.
+ + " on the same Volume Group Id already in progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicated request on the same Volume Group Id already in progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicated request on the same Volume Group Id already in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ }
+
+ String orchestrationURI = "";
+
+ // Query MSO Catalog DB
+ try (CatalogDatabase db = new CatalogDatabase()) {
+
+ Recipe recipe = null;
+
+ if (version.equals(Constants.SCHEMA_VERSION_V1)) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception in createRequestRecord", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ "Response from BPEL engine is failed with HTTP Status=" + bpelStatus, "Camunda", "", MsoLogger.ErrorCode.DataError, "Response from BPEL engine is failed with HTTP Status=" + bpelStatus);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is with status Failed");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.error (MessageEnum.APIH_BPEL_RESPONSE_ERROR, "Response from BPEL engine is empty", "Camunda", "", MsoLogger.ErrorCode.DataError, "Response from BPEL engine is empty");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is empty");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ return resp;
+ }
+ }
+ } catch (Exception e) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, "", "", MsoLogger.ErrorCode.DataError, "Exception while communciate with Catalog DB", e);
+ String dupMessage = "Error: Locked instance - This " + requestScope + " (" + instance + ") " + "already has a request being worked with a status of " + dup.getRequestStatus() + " (RequestId - " + dup.getRequestId() + "). The existing request must finish or be cleaned up before proceeding.";
+ //List<String> variables = new ArrayList<String>();
+ msoLogger.warn (MessageEnum.APIH_DUPLICATE_FOUND, dupMessage, "", "", MsoLogger.ErrorCode.SchemaError, "Duplicate request - Subscriber already has a request for this service");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while querying Catalog DB");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ db.close();
+ return response;
+ }
+
+ if (recipeLookupResult == null) {
+ msoLogger.error (MessageEnum.APIH_DB_ATTRIBUTE_NOT_FOUND, MSO_PROP_APIHANDLER_INFRA, "", "", MsoLogger.ErrorCode.DataError, "No recipe found in DB");
+ else if (action == Action.createInstance || action == Action.updateInstance){
+ // There is no entry for this vfModuleType with this version, if specified, in VF_MODULE table in Catalog DB.
+ // This request cannot proceed
+ msoLogger.error (MessageEnum.APIH_DB_ATTRIBUTE_NOT_FOUND, MSO_PROP_APIHANDLER_INFRA, "VF Module Type", "", MsoLogger.ErrorCode.DataError, "No VfModuleType found in DB");
+ "VnfType " + msoRequest.getVnfType () + " and VF Module Model Name " + msoRequest.getVfModuleModelName() + serviceVersionText + " not found in MSO Catalog DB",
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.SchemaError, "Exception while creating record in DB", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) camundaJSONResponseBody);
+ msoLogger.error (MessageEnum.APIH_VNFREQUEST_VALIDATION_ERROR, "", "", MsoLogger.ErrorCode.DataError, "Exception in format for vnf request ", e);
+ throw new ValidationException ("format for vnf request");
+ }
+
+ if (isWrongRootElement) {
+ msoLogger.error (MessageEnum.APIH_REQUEST_VALIDATION_ERROR_REASON, "root element is not correct", "", "", MsoLogger.ErrorCode.DataError, "root element is not correct");
+ throw new ValidationException ("root element <vnf-request> expected");
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not started, properties file missing or invalid or Database Connection failed</body></html>";
+
+ private static final Response NOT_STARTED_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_FOUND)
+ .build ();
+
+ @GET
+ public Response queryFilters (@QueryParam("vnf-type") String vnfType,
+ if (MsoPropertiesUtils.getNoPropertiesState()) {
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.ServiceNotAvailable, "Exiting the transaction: Infra API Handler not started, properties file missing or invalid");
+ return NOT_STARTED_RESPONSE;
+ }
+
+ uriInfo.getRequestUri ();
+
+ if (reqXML == null) {
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.BadRequest, "The content of the request is null");
+ msoLogger.warn (MessageEnum.APIH_DUPLICATE_FOUND, "CREATE on the same VNF Name is already progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicates request - CREATE on the same VNF Name is already progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicates request - CREATE on the same VNF Name is already progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ } else {
+ // Check if this request is a duplicate of the one with the same vnfId
+ InfraActiveRequests dup = null;
+ msoLogger.debug ("Checking for a duplicate with the same vnf-id");
+ msoLogger.error (MessageEnum.APIH_DUPLICATE_CHECK_EXC_ATT, "vnf-id", "", "", MsoLogger.ErrorCode.DataError, "Exception while checking for a duplicate request with the same vnf-id", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while checking for a duplicate request with the same vnf-id");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ if (dup != null) {
+ // Found the duplicate record. Return the appropriate error.
+ + " on the same VNF Id already in progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicated request on the same VNF Id already in progress");
+
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicated request on the same VNF Id already in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ }
+
+ String orchestrationURI = "";
+
+ // Query MSO Catalog DB
+ try (CatalogDatabase db = new CatalogDatabase()){
+
+ Recipe recipe = null;
+
+ if (version.equals(Constants.SCHEMA_VERSION_V1)) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.SchemaError, "Exception while creating record in DB", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ "Response from BPEL engine is failed with HTTP Status=" + bpelStatus, "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is failed with HTTP Status=" + bpelStatus);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is failed");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.error (MessageEnum.APIH_BPEL_RESPONSE_ERROR, "Response from BPEL engine is empty", "Camunda", "", MsoLogger.ErrorCode.BusinessProcesssError, "Response from BPEL engine is empty");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPEL engine is empty");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ return resp;
+ }
+ }
+ } catch (Exception e) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception while communciate with Catalog DB", e);
+ } else if (response.getStatus() == HttpStatus.SC_NOT_FOUND) {
+ returnMsg = "Record not found . StatusCode=" + HttpStatus.SC_NOT_FOUND;
+ } else if (response.getStatus() == HttpStatus.SC_BAD_REQUEST) {
+ returnMsg = "Bad request: one of the following attribute serviceType, aicNodeClli, tenantId, volumeGroupId, volumeGroupName should be defined. StatusCode=" + HttpStatus.SC_BAD_REQUEST;
+ throw new ValidationException ("format for volume request");
+ }
+
+ if (isWrongRootElement) {
+ msoLogger.error (MessageEnum.APIH_REQUEST_VALIDATION_ERROR_REASON, "root element is not correct", "", "", MsoLogger.ErrorCode.SchemaError, "root element <volume-request> expected");
+ throw new ValidationException ("root element <volume-request> expected");
+ }
+
+ if (volumeReq == null) {
+ throw new ValidationException ("volume-request");
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not started, properties file missing or invalid or Database Connection failed</body></html>";
+
+ private static final Response NOT_STARTED_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_FOUND)
+ .build ();
+
+ @GET
+ public Response queryFilters (@QueryParam("vnf-type") String vnfType,
+ } else if (response.getStatus() == HttpStatus.SC_NOT_FOUND) {
+ returnMsg = "Record not found . StatusCode=" + HttpStatus.SC_NOT_FOUND;
+ } else if (response.getStatus() == HttpStatus.SC_BAD_REQUEST) {
+ returnMsg = "Bad request: one of the following attribute serviceType, aicNodeClli, tenantId, volumeGroupId, volumeGroupName should be defined. StatusCode=" + HttpStatus.SC_BAD_REQUEST;
+ }
+ return returnMsg;
+ }
+
+ @POST
+ @Path("/")
+ @Produces(MediaType.APPLICATION_XML)
+ public Response manageVolumeRequest (String reqXML, @PathParam("version") String version) {
+ "CREATE on the same Volume Group Name is already progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicates request - CREATE on the same Volume Group Name is already progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicates request - CREATE on the same Volume Group Name is already progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ } else {
+ // Check if this request is a duplicate of the one with the same volumeGroupId
+ InfraActiveRequests dup = null;
+ msoLogger.debug ("Checking for a duplicate with the same volume-group-id");
+ msoLogger.error (MessageEnum.APIH_DUPLICATE_CHECK_EXC_ATT, "volume-group-id", "", "", MsoLogger.ErrorCode.DataError, "Exception while checking for a duplicate request with the sam volume-group-id", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.DBAccessError, "Exception while checking for a duplicate request with the sam volume-group-id");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ if (dup != null) {
+ // Found the duplicate record. Return the appropriate error.
+ + " on the same Volume Group Id already in progress", "", "", MsoLogger.ErrorCode.DataError, "Duplicated request on the same Volume Group Id already in progress");
+ msoRequest.createRequestRecord (Status.FAILED);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.Conflict, "Duplicated request on the same Volume Group Id already in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) response.getEntity ());
+ return response;
+ }
+ }
+
+ String orchestrationURI = "";
+
+ // Query MSO Catalog DB
+ try(CatalogDatabase db = new CatalogDatabase ()) {
+ Recipe recipe = null;
+
+ if (version.equals(Constants.SCHEMA_VERSION_V1)) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "Exception while creating record in DB", "", "", MsoLogger.ErrorCode.AvailabilityError, "Exception in createRequestRecord", e);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "BPMN accepted the request, the request is in progress");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ "Response from BPEL engine is failed with HTTP Status=" + bpelStatus, "Camunda", "", MsoLogger.ErrorCode.DataError, "Response from BPEL engine is failed with HTTP Status=" + bpelStatus);
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is with status Failed");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ msoLogger.error (MessageEnum.APIH_BPEL_RESPONSE_ERROR, "Response from BPEL engine is empty", "Camunda", "", MsoLogger.ErrorCode.DataError, "Response from BPEL engine is empty");
+ msoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.InternalError, "Response from BPMN engine is empty");
+ msoLogger.debug ("End of the transaction, the final response is: " + (String) resp.getEntity ());
+ return resp;
+ }
+ }
+
+ } catch (Exception e) {
+ msoLogger.error (MessageEnum.APIH_DB_ACCESS_EXC, "", "", MsoLogger.ErrorCode.DataError, "Exception while communciate with Catalog DB", e);
+ * Java content interface and Java element interface
+ * generated in the org.openecomp.mso.apihandlerinfra.vnfbeans1 package.
+ * <p>An ObjectFactory allows you to programatically
+ * construct new instances of the Java representation
+ * for XML content. The Java representation of XML
+ * content can consist of schema derived interfaces
+ * and classes representing the binding of schema
+ * type definitions, element declarations and model
+ * groups. Factory methods for each of these are
+ * provided in this class.
+ *
+ */
+@XmlRegistry
+public class ObjectFactory {
+
+ private final static QName _NetworkParams_QNAME = new QName("http://ecomp.att.com/mso/infra/network-request/v1", "network-params");
+
+ /**
+ * Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: org.openecomp.mso.apihandlerinfra.vnfbeans1
+ *
+ */
+ public ObjectFactory() {
+ }
+
+ /**
+ * Create an instance of {@link RequestInfo }
+ *
+ */
+ public RequestInfo createRequestInfo() {
+ return new RequestInfo();
+ }
+
+ /**
+ * Create an instance of {@link NetworkRequest }
+ *
+ */
+ public NetworkRequest createNetworkRequest() {
+ return new NetworkRequest();
+ }
+
+ /**
+ * Create an instance of {@link NetworkInputs }
+ *
+ */
+ public NetworkInputs createNetworkInputs() {
+ return new NetworkInputs();
+ }
+
+ /**
+ * Create an instance of {@link NetworkOutputs }
+ *
+ */
+ public NetworkOutputs createNetworkOutputs() {
+ return new NetworkOutputs();
+ }
+
+ /**
+ * Create an instance of {@link NetworkRequests }
+ *
+ */
+ public NetworkRequests createNetworkRequests() {
+ return new NetworkRequests();
+ }
+
+ /**
+ * Create an instance of {@link NetworkTypes }
+ *
+ */
+ public NetworkTypes createNetworkTypes() {
+ return new NetworkTypes();
+ }
+
+ /**
+ * Create an instance of {@link NetworkType }
+ *
+ */
+ public NetworkType createNetworkType() {
+ return new NetworkType();
+ }
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link Object }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://ecomp.att.com/mso/infra/network-request/v1", name = "network-params")
+ public JAXBElement<Object> createNetworkParams(Object value) {
+ return new JAXBElement<Object>(_NetworkParams_QNAME, Object.class, null, value);
+ * Java content interface and Java element interface
+ * generated in the org.openecomp.mso.apihandlerinfra.vnfbeans1 package.
+ * <p>An ObjectFactory allows you to programatically
+ * construct new instances of the Java representation
+ * for XML content. The Java representation of XML
+ * content can consist of schema derived interfaces
+ * and classes representing the binding of schema
+ * type definitions, element declarations and model
+ * groups. Factory methods for each of these are
+ * provided in this class.
+ *
+ */
+@XmlRegistry
+public class ObjectFactory {
+
+ private final static QName _VnfParams_QNAME = new QName("http://ecomp.att.com/mso/infra/vnf-request/v1", "vnf-params");
+ private final static QName _NetworkParams_QNAME = new QName("http://ecomp.att.com/mso/infra/vnf-request/v1", "network-params");
+
+ /**
+ * Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: org.openecomp.mso.apihandlerinfra.vnfbeans1
+ *
+ */
+ public ObjectFactory() {
+ }
+
+ /**
+ * Create an instance of {@link VnfInputs }
+ *
+ */
+ public VnfInputs createVnfInputs() {
+ return new VnfInputs();
+ }
+
+ /**
+ * Create an instance of {@link RequestInfo }
+ *
+ */
+ public RequestInfo createRequestInfo() {
+ return new RequestInfo();
+ }
+
+ /**
+ * Create an instance of {@link VnfOutputs }
+ *
+ */
+ public VnfOutputs createVnfOutputs() {
+ return new VnfOutputs();
+ }
+
+ /**
+ * Create an instance of {@link VnfType }
+ *
+ */
+ public VnfType createVnfType() {
+ return new VnfType();
+ }
+
+ /**
+ * Create an instance of {@link VnfRequest }
+ *
+ */
+ public VnfRequest createVnfRequest() {
+ return new VnfRequest();
+ }
+
+
+ /**
+ * Create an instance of {@link VnfTypes }
+ *
+ */
+ public VnfTypes createVnfTypes() {
+ return new VnfTypes();
+ }
+
+ /**
+ * Create an instance of {@link VnfRequests }
+ *
+ */
+ public VnfRequests createVnfRequests() {
+ return new VnfRequests();
+ }
+
+ /**
+ * Create an instance of {@link VfModuleModelName }
+ *
+ */
+ public VfModuleModelName createVfModuleModelName() {
+ return new VfModuleModelName();
+ }
+
+ /**
+ * Create an instance of {@link VfModuleModelNames }
+ *
+ */
+ public VfModuleModelNames createVfModuleModelNames() {
+ return new VfModuleModelNames();
+ }
+
+
+
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link Object }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://ecomp.att.com/mso/infra/vnf-request/v1", name = "vnf-params")
+ public JAXBElement<Object> createVnfParams(Object value) {
+ return new JAXBElement<Object>(_VnfParams_QNAME, Object.class, null, value);
+ * Java content interface and Java element interface
+ * generated in the org.openecomp.mso.apihandlerinfra.vnfbeans1 package.
+ * <p>An ObjectFactory allows you to programatically
+ * construct new instances of the Java representation
+ * for XML content. The Java representation of XML
+ * content can consist of schema derived interfaces
+ * and classes representing the binding of schema
+ * type definitions, element declarations and model
+ * groups. Factory methods for each of these are
+ * provided in this class.
+ *
+ */
+@XmlRegistry
+public class ObjectFactory {
+
+ private final static QName _VolumeParams_QNAME = new QName("http://ecomp.att.com/mso/infra/volume-request/v1", "volume-params");
+
+ /**
+ * Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: org.openecomp.mso.apihandlerinfra.vnfbeans1
+ *
+ */
+ public ObjectFactory() {
+ }
+
+ /**
+ * Create an instance of {@link RequestInfo }
+ *
+ */
+ public RequestInfo createRequestInfo() {
+ return new RequestInfo();
+ }
+
+ /**
+ * Create an instance of {@link VolumeRequest }
+ *
+ */
+ public VolumeRequest createVolumeRequest() {
+ return new VolumeRequest();
+ }
+
+ /**
+ * Create an instance of {@link VolumeInputs }
+ *
+ */
+ public VolumeInputs createVolumeInputs() {
+ return new VolumeInputs();
+ }
+
+ /**
+ * Create an instance of {@link VolumeOutputs }
+ *
+ */
+ public VolumeOutputs createVolumeOutputs() {
+ return new VolumeOutputs();
+ }
+
+ /**
+ * Create an instance of {@link VolumeRequests }
+ *
+ */
+ public VolumeRequests createVolumeRequests() {
+ return new VolumeRequests();
+ }
+
+
+
+ /**
+ * Create an instance of {@link JAXBElement }{@code <}{@link Object }{@code >}}
+ *
+ */
+ @XmlElementDecl(namespace = "http://ecomp.att.com/mso/infra/volume-request/v1", name = "volume-params")
+ public JAXBElement<Object> createVolumeParams(Object value) {
+ return new JAXBElement<Object>(_VolumeParams_QNAME, Object.class, null, value);
+ private static final MsoLogger LOGGER = MsoLogger.getMsoLogger (MsoLogger.Catalog.APIH);
+
+ private static SessionFactory SESSION_FACTORY;
+
+
+ static {
+ try {
+
+ if ("MYSQL".equals (System.getProperty ("mso.db")) || "MARIADB".equals(System.getProperty("mso.db"))) {
+ SESSION_FACTORY = new Configuration ().configure ("hibernate-mysql.cfg.xml").buildSessionFactory ();
+ } else {
+ LOGGER.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "DB Connection not specified to the JVM,choose either:-Dmso.db=MARIADB, -Dmso.db=MYSQL or -Dmso.container=AJSC", "", "", MsoLogger.ErrorCode.DataError , "DB Connection not specified to the JVM,choose either:-Dmso.db=MARIADB, -Dmso.db=MYSQL or -Dmso.container=AJSC");
+ }
+ } catch (Exception ex) {
+ LOGGER.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, ex.getMessage (), "", "", MsoLogger.ErrorCode.DataError , "Problem in getting DB connection type", ex);
+ throw ex;
+ }
+ }
+
+ public static SessionFactory getSessionFactory () {
+ Query query = session.createQuery ("from InfraActiveRequests where vnfName = :vnfName and action = :action and (requestStatus = 'PENDING' or requestStatus = 'IN_PROGRESS' or requestStatus = 'TIMEOUT') and requestType = :requestType ORDER BY startTime DESC");
+ query.setParameter ("vnfName", vnfName);
+ query.setParameter ("action", action);
+ query.setParameter (REQUEST_TYPE, requestType);
+ @SuppressWarnings("unchecked")
+ List <InfraActiveRequests> results = query.list ();
+ Query query = session.createQuery ("from InfraActiveRequests where vnfId = :vnfId and action = :action and (requestStatus = 'PENDING' or requestStatus = 'IN_PROGRESS' or requestStatus = 'TIMEOUT') and requestType = :requestType ORDER BY startTime DESC");
+ query.setParameter ("vnfId", vnfId);
+ query.setParameter ("action", action);
+ query.setParameter (REQUEST_TYPE, requestType);
+ @SuppressWarnings("unchecked")
+ List <InfraActiveRequests> results = query.list ();
+ LOGGER.error (MessageEnum.APIH_DB_ACCESS_EXC_REASON, "DB Connection not specified to the JVM,choose either:-Dmso.db=MARIADB, -Dmso.db=MYSQL or -Dmso.container=AJSC", "", "", MsoLogger.ErrorCode.DataError, "DB Connection not specified to the JVM,choose either:-Dmso.db=MARIADB, -Dmso.db=MYSQL or -Dmso.container=AJSC");
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: serviceName='" + serviceName + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for serviceName=" + serviceName, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for serviceName=" + serviceName);
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: serviceNameVersionId='" + serviceNameVersionId + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for serviceNameVersionId=" + serviceNameVersionId, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for serviceNameVersionId=" + serviceNameVersionId);
+ LOGGER.debug("Non Unique Result Exception - data integrity error: service_id='" + serviceId + "', serviceVersion='" + serviceVersion + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for service_id=" + serviceId + " and serviceVersion=" + serviceVersion, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for service_id=" + serviceId);
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: vnf_id='" + vnfId + "', componentType='" + type + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for vnf_id=" + vnfId + " and componentType=" + type, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for vnf_id=" + vnfId);
+ result = null;
+ } catch (org.hibernate.HibernateException he) {
+ LOGGER.debug("Hibernate Exception - while searching for: vnf_id='" + vnfId + "', componentType='" + type + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Hibernate exception searching for vnf_id=" + vnfId + " and componentType=" + type, "", "", MsoLogger.ErrorCode.DataError, "Hibernate exception searching for vnf_id=" + vnfId);
+ result = null;
+ } catch (Exception e) {
+ LOGGER.debug("Generic Exception - while searching for: vnf_id='" + vnfId + "', componentType='" + type + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Generic exception searching for vnf_id=" + vnfId + " and componentType=" + type, "", "", MsoLogger.ErrorCode.DataError, "Generic exception searching for vnf_id=" + vnfId);
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: vnfType='" + vnfType + "', serviceVersion='" + serviceVersion + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for vnfType=" + vnfType + " and serviceVersion=" + serviceVersion, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for vnfType=" + vnfType);
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: type='" + modelName + "', asdc_service_model_version='" + model_version + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for type=" + modelName + " and version=" + model_version, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for type=" + modelName);
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: type='" + type + "', asdc_service_model_version='" + version + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for type=" + type + " and version=" + version, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for type==" + type);
+ module = null;
+ } catch (org.hibernate.HibernateException he) {
+ LOGGER.debug("Hibernate Exception - while searching for: type='" + type + "', asdc_service_model_version='" + version + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Hibernate exception searching for type=" + type + " and version=" + version, "", "", MsoLogger.ErrorCode.DataError, "Hibernate exception searching for type=" + type);
+ module = null;
+ } catch (Exception e) {
+ LOGGER.debug("Generic Exception - while searching for: type='" + type + "', asdc_service_model_version='" + version + "'");
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Generic exception searching for type=" + type + " and version=" + version, "", "", MsoLogger.ErrorCode.DataError, "Generic exception searching for type=" + type);
+ if (resultList1.size() > 1 && (!resultList1. get (0).getOrchestrationUri().equals(resultList1.get (1).getOrchestrationUri ()))) {
+ LOGGER.recordMetricEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully. Different ORCHESTRATION URIs found for same VERSION and ID. No result returned.", "CatalogDB", "getVnfComponentsRecipe", null);
+ return null;
+ }
+ return resultList1.get (0);
+ }
+
+
+ /**
+ * Return all VNF Resources in the Catalog DB
+ *
+ * @return A list of VnfResource objects
+ */
+ @SuppressWarnings("unchecked")
+ public List <VnfResource> getAllVnfResources () {
+
+ long startTime = System.currentTimeMillis ();
+ LOGGER.debug ("Catalog database - get all VNF resources");
+ LOGGER.debug ("No heatFiles found for vfModuleId=" + vfModuleId);
+ LOGGER.recordMetricEvent (startTime, MsoLogger.StatusCode.COMPLETE, MsoLogger.ResponseCode.Suc, "Successfully. No heatfiles found for vfModule", "CatalogDB", "getHeatFilesForVfModule", null);
+ return null;
+ }
+ //Now the fun part - we have a list of the heat files we need to get - could clean this up with a join
+ //TODO - convert this all with one join - brute force for now due to time
+ heatFiles = new HashMap<String, HeatFiles>();
+ for (VfModuleToHeatFiles vmthf : mapList) {
+ int heatFilesId = vmthf.getHeatFilesId();
+ hql = "FROM HeatFiles where id = :id_value";
+ query = getSession().createQuery(hql);
+ query.setParameter("id_value", heatFilesId);
+ List<HeatFiles> fileList = query.list();
+ if (fileList.isEmpty()) {
+ // Should this throw an exception??
+ LOGGER.debug("Unable to find a HEAT_FILES entry at " + heatFilesId);
+ String errorString = "_ERROR|" + heatFilesId;
+ // The receiving code needs to know to throw an exception for this - or ignore it.
+ heatFiles.put(errorString, null);
+ } else {
+ // Should only ever have 1 result - add it to our Map
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: envName='" + name + "', version='" + version + "' and asdcResourceName=" + asdcResourceName);
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for envName=" + name + " and version=" + version + " and asdcResourceName=" + asdcResourceName, "", "", MsoLogger.ErrorCode.DataError, "non unique result for envName=" + name);
+ env = null;
+ } catch (org.hibernate.HibernateException he) {
+ LOGGER.debug("Hibernate Exception - while searching for: envName='" + name + "', asdc_service_model_version='" + version + "' and asdcResourceName=" + asdcResourceName);
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Hibernate exception searching for envName=" + name + " and version=" + version + " and asdcResourceName=" + asdcResourceName, "", "", MsoLogger.ErrorCode.DataError, "Hibernate exception searching for envName=" + name);
+ env = null;
+ } catch (Exception e) {
+ LOGGER.debug("Generic Exception - while searching for: envName='" + name + "', asdc_service_model_version='" + version + "' and asdcResourceName=" + asdcResourceName);
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Generic exception searching for envName=" + name + " and version=" + version + " and asdcResourceName=" + asdcResourceName, "", "", MsoLogger.ErrorCode.DataError, "Generic exception searching for envName=" + name);
+ LOGGER.debug ("Catalog database - save Service with ServiceName/Version/serviceUUID(SERVICE_NAME_VERSION_ID)" + service.getServiceName()+"/"+service.getServiceVersion()+"/"+service.getServiceNameVersionId());
+ try {
+ Service serviceDB = this.getServiceByUUID(service.getServiceNameVersionId());
+ public HeatFiles getHeatFiles(int vnfResourceId,String fileName,String asdcResourceName, String version) {
+ long startTime = System.currentTimeMillis ();
+ LOGGER.debug ("Catalog database - getHeatFiles with name " + fileName
+ + " and vnfResourceID "
+ + vnfResourceId
+// + " and ASDC resource name "
+ + asdcResourceName
+ + " and version "
+ + version);
+
+ String hql = "FROM HeatFiles WHERE fileName = :fileName AND vnfResourceId = :vnfResourceId AND asdcResourceName = :asdcResourceName AND version = :version";
+ LOGGER.debug("Non Unique Result Exception - the Catalog Database does not match a unique row - data integrity error: fileName='" + fileName + "', vnfResourceId='" + vnfResourceId + "' and asdcResourceName=" + asdcResourceName + " and version=" + version);
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " non unique result for fileName=" + fileName + " and vnfResourceId=" + vnfResourceId + " and asdcResourceName=" + asdcResourceName + " and version=" + version, "", "", MsoLogger.ErrorCode.DataError, "Non unique result for fileName=" + fileName);
+ heatFilesResult = null;
+ } catch (org.hibernate.HibernateException he) {
+ LOGGER.debug("Hibernate Exception - while searching for: fileName='" + fileName + "', vnfResourceId='" + vnfResourceId + "' and asdcResourceName=" + asdcResourceName + " and version=" + version);
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Hibernate exception searching for fileName=" + fileName + " and vnfResourceId=" + vnfResourceId + " and asdcResourceName=" + asdcResourceName + " and version=" + version, "", "", MsoLogger.ErrorCode.DataError, "Hibernate exception searching for fileName=" + fileName);
+ heatFilesResult = null;
+ } catch (Exception e) {
+ LOGGER.debug("Generic Exception - while searching for: fileName='" + fileName + "', vnfResourceId='" + vnfResourceId + "' and asdcResourceName=" + asdcResourceName + " and version=" + version);
+ LOGGER.error(MessageEnum.GENERAL_EXCEPTION, " Generic exception searching for fileName=" + fileName + " and vnfResourceId=" + vnfResourceId + " and asdcResourceName=" + asdcResourceName + " and version=" + version, "", "", MsoLogger.ErrorCode.DataError, "Generic exception searching for fileName=" + fileName);
+ // org.openecomp.mso.db.catalog.beans.VfModuleToHeatFiles$$EnhancerByMockitoWithCGLIB$$309d9392 cannot be cast to org.openecomp.mso.db.catalog.beans.HeatTemplate
+ System.out.println("InputStream is NULL for:"+filename);
+ }
+ try {
+ return new DistributionClientDownloadResultImpl(DistributionActionResultEnum.SUCCESS, DistributionActionResultEnum.SUCCESS.name(),arg0.getArtifactName(),IOUtils.toByteArray(inputStream));
+ } catch (IOException e) {
+
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ @Override
+ public IConfiguration getConfiguration() {
+ return null;
+ }
+
+ @Override
+ public IDistributionClientResult init(IConfiguration arg0, INotificationCallback arg1) {
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+ }
+
+ @Override
+ public IDistributionClientResult sendDeploymentStatus(IDistributionStatusMessage arg0) {
+ this.distributionMessageReceived.add(arg0);
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+ }
+
+ @Override
+ public IDistributionClientResult sendDeploymentStatus(IDistributionStatusMessage arg0, String arg1) {
+ this.distributionMessageReceived.add(arg0);
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+ }
+
+ @Override
+ public IDistributionClientResult sendDownloadStatus(IDistributionStatusMessage arg0) {
+ this.distributionMessageReceived.add(arg0);
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+ }
+
+ @Override
+ public IDistributionClientResult sendDownloadStatus(IDistributionStatusMessage arg0, String arg1) {
+ this.distributionMessageReceived.add(arg0);
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+ }
+
+ @Override
+ public IDistributionClientResult start() {
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+ }
+
+ @Override
+ public IDistributionClientResult stop() {
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+
+ }
+
+ @Override
+ public IDistributionClientResult updateConfiguration(IConfiguration arg0) {
+ return new DistributionClientResultImpl(DistributionActionResultEnum.SUCCESS,DistributionActionResultEnum.SUCCESS.name());
+]]></con:request><con:originalUri>http://localhost/ecomp/mso/v1/services</con:originalUri><con:assertion type="Valid HTTP Status Codes" name="Valid HTTP Status Codes" id="19038271-9229-4ae5-a743-24d0b2790cae"><con:configuration><codes>202
+]]></con:request><con:originalUri>http://ihoap24.cif.att.com/ecomp/mso/infra/v3/volume-request</con:originalUri><con:assertion type="Valid HTTP Status Codes" id="4b4088c5-0c35-4234-a473-82ee5a7c47a3" name="Valid HTTP Status Codes"><con:configuration><codes>202
+</soapenv:Envelope>]]></con:request><con:assertion type="Valid HTTP Status Codes" name="Valid HTTP Status Codes" id="ae765673-245e-42b3-85a1-d61a902855f9"><con:configuration><codes>500
+</soapenv:Envelope>]]></con:request><con:assertion type="Valid HTTP Status Codes" name="Valid HTTP Status Codes" id="87ce9425-4cca-404d-bb04-626920fc11d4"><con:configuration><codes>500
+</con:targetPath><con:upgraded>true</con:upgraded></con:transfers><con:transfers setNullOnMissingSource="true" transferTextContent="true" failOnError="true" ignoreEmpty="false" transferToAll="false" entitize="false" transferChildNodes="false"><con:name>ToGet</con:name><con:sourceType>Response</con:sourceType><con:sourceStep>POST VNF infra request - NO BPEL</con:sourceStep><con:sourcePath>declare namespace vnf='http://ecomp.att.com/mso/infra/vnf-request/v1';
+//vnf:vnf-request[1]/vnf:request-info[1]/vnf:request-id[1]</con:sourcePath><con:targetType>request-id</con:targetType><con:targetStep>Get Infra request</con:targetStep><con:targetPath/><con:upgraded>true</con:upgraded></con:transfers><con:transfers setNullOnMissingSource="true" transferTextContent="true" failOnError="true" ignoreEmpty="false" transferToAll="false" entitize="false" transferChildNodes="false"><con:name>ToSoapGet</con:name><con:sourceType>Response</con:sourceType><con:sourceStep>POST VNF infra request - NO BPEL</con:sourceStep><con:sourcePath>declare namespace vnf='http://ecomp.att.com/mso/infra/vnf-request/v1';
+//vnf:vnf-request[1]/vnf:request-info[1]/vnf:request-id[1]</con:sourcePath><con:targetType>Request</con:targetType><con:targetStep>get infra soap</con:targetStep><con:targetPath>declare namespace soapenv='http://schemas.xmlsoap.org/soap/envelope/';
+(1,'*',NULL,'CREATE','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/createCinderVolumeV1','VOLUME_GROUP',NULL,180,NULL),
+(2,'*',NULL,'DELETE','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/deleteCinderVolumeV1','VOLUME_GROUP',NULL,180,NULL),
+(3,'*',NULL,'UPDATE','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/updateCinderVolumeV1','VOLUME_GROUP',NULL,180,NULL),
+(4,NULL,'*','CREATE_VF_MODULE_VOL','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/CreateVfModuleVolume','VOLUME_GROUP',NULL,180,NULL),
+(5,NULL,'*','DELETE_VF_MODULE_VOL','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/DeleteVfModuleVolume','VOLUME_GROUP',NULL,180,NULL),
+(6,NULL,'*','UPDATE_VF_MODULE_VOL','1','Recipe Match All for VF Modules if no custom flow exists','/mso/async/services/UpdateVfModuleVolume','VOLUME_GROUP',NULL,180,NULL);
+/*!40000 ALTER TABLE `VNF_COMPONENTS_RECIPE` ENABLE KEYS */;
+UNLOCK TABLES;
+
+INSERT INTO service (id, SERVICE_NAME, VERSION_STR, DESCRIPTION) VALUES ('4', 'VID_DEFAULT', '1.0', 'Default service for VID to use for infra APIH orchestration');
+INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('4', 'createInstance', '1', 'VID_DEFAULT recipe to create service-instance if no custom BPMN flow is found', '/mso/async/services/CreateServiceInstanceInfra', '180');
+INSERT INTO service_recipe (SERVICE_ID, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('4', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete service-instance if no custom BPMN flow is found', '/mso/async/services/DeleteServiceInstanceInfra', '180');
+INSERT INTO vnf_recipe (VNF_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'createInstance', '1', 'VID_DEFAULT recipe to create VNF if no custom BPMN flow is found', '/mso/async/services/CreateVnfInfra', '180');
+INSERT INTO vnf_recipe (VNF_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete VNF if no custom BPMN flow is found', '/mso/async/services/DeleteVnfInfra', '180');
+INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'createInstance', '1', 'VID_DEFAULT recipe to create volume-group if no custom BPMN flow is found', '/mso/async/service/CreateVfModuleVolumeInfraV1', '180', 'VID_DEFAULT');
+INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete volume-group if no custom BPMN flow is found', '/mso/async/service/DeleteVfModuleVolumeInfraV1', '180', 'VID_DEFAULT');
+INSERT INTO vnf_components_recipe (VNF_TYPE, VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES (NULL, 'volumeGroup', 'updateInstance', '1', 'VID_DEFAULT recipe to update volume-group if no custom BPMN flow is found', '/mso/async/service/UpdateVfModuleVolumeInfraV1', '180', 'VID_DEFAULT');
+INSERT INTO vnf_components_recipe (VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES ('vfModule', 'createInstance', '1', 'VID_DEFAULT recipe to create vf-module if no custom BPMN flow is found', '/mso/async/services/CreateVfModuleInfra', '180', 'VID_DEFAULT');
+INSERT INTO vnf_components_recipe (VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES ('vfModule', 'deleteInstance', '1', 'VID_DEFAULT recipe to delete vf-module if no custom BPMN flow is found', '/mso/async/services/DeleteVfModuleInfra', '180', 'VID_DEFAULT');
+INSERT INTO vnf_components_recipe (VNF_COMPONENT_TYPE, ACTION, VERSION, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT, VF_MODULE_ID) VALUES ('vfModule', 'updateInstance', '1', 'VID_DEFAULT recipe to update vf-module if no custom BPMN flow is found', '/mso/async/services/UpdateVfModuleInfra', '180', 'VID_DEFAULT');
+INSERT INTO network_recipe (NETWORK_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'createInstance', '1.0', 'VID_DEFAULT recipe to create network if no custom BPMN flow is found', '/mso/async/services/CreateNetworkInstanceInfra', '180');
+INSERT INTO network_recipe (NETWORK_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'updateInstance', '1.0', 'VID_DEFAULT recipe to update network if no custom BPMN flow is found', '/mso/async/services/UpdateNetworkInstanceInfra', '180');
+INSERT INTO network_recipe (NETWORK_TYPE, ACTION, VERSION_STR, DESCRIPTION, ORCHESTRATION_URI, RECIPE_TIMEOUT) VALUES ('VID_DEFAULT', 'deleteInstance', '1.0', 'VID_DEFAULT recipe to delete network if no custom BPMN flow is found', '/mso/async/services/DeleteNetworkInstanceInfra', '180');
+/*!40000 ALTER TABLE `heat_environment` ENABLE KEYS */;
+UNLOCK TABLES;
+
+LOCK TABLES `heat_template` WRITE;
+/*!40000 ALTER TABLE `heat_template` DISABLE KEYS */;
+INSERT INTO `heat_template` VALUES (6,'base_vlb.yaml','1.0','DNSResource','base_vlb.yaml','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vLoadBalancer/vDNS demo app for OpenECOMP\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_net_cidr:\n type: string\n label: vLoadBalancer private network CIDR\n description: The CIDR of the vLoadBalancer private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vlb_name_0:\n type: string\n label: vLoadBalancer name\n description: Name of the vLoadBalancer\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n vlb_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: vlb_private_net_id }\n\n vlb_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n name: { get_param: vlb_private_net_id }\n network_id: { get_resource: vlb_private_network }\n cidr: { get_param: vlb_private_net_cidr }\n\n vlb_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vlb_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vlb_private_0_port }\n - port: { get_resource: vlb_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__: { get_param: dcae_collector_ip }\n __local_private_ipaddr__: { get_param: vlb_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_lb_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vlb.sh\n chmod +x v_lb_init.sh\n chmod +x vlb.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo "no" > config/install.txt\n LOCAL_PUBLIC_IPADDR=$(ifconfig eth0 | grep "inet addr" | tr -s \' \' | cut -d\' \' -f3 | cut -d\':\' -f2)\n echo $LOCAL_PUBLIC_IPADDR > config/local_public_ipaddr.txt\n mv vlb.sh /etc/init.d\n update-rc.d vlb.sh defaults\n ./v_lb_init.sh\n\n vlb_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{"subnet": { get_resource: vlb_private_subnet }, "ip_address": { get_param: vlb_private_ip_0 }}]\n\n vlb_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vlb_private_ip_1 }}]\n\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo "no" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{"subnet": { get_resource: vlb_private_subnet }, "ip_address": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vdns_private_ip_1 }}]\n',300,'Artifact-UUID1','Base VLB Heat','label','2016-11-14 13:04:07',NULL);
+INSERT INTO `heat_template` VALUES (7,'dnsscaling.yaml','1.0','DNSResource','dnsscaling.yaml','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy a vDNS for OpenECOMP (scaling-up scenario)\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_param: key_name }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo "no" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: vlb_private_net_id }\n fixed_ips: [{"subnet": { get_param: vlb_private_net_id }, "ip_address": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{"subnet": { get_param: ecomp_private_net_id }, "ip_address": { get_param: vdns_private_ip_1 }}]\n',300,'Artifact-UUID2','DNS Scaling Heat','label','2016-11-14 13:04:07',NULL);
+/*!40000 ALTER TABLE `heat_template` ENABLE KEYS */;
+UNLOCK TABLES;
+
+LOCK TABLES `heat_template_params` WRITE;
+/*!40000 ALTER TABLE `heat_template_params` DISABLE KEYS */;
+INSERT INTO `heat_template_params` VALUES (110,6,'vlb_flavor_name','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (111,6,'vlb_private_ip_1','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (112,6,'dcae_collector_ip','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (113,6,'vlb_private_net_cidr','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (114,6,'ecomp_private_net_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (115,6,'vnf_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (116,6,'key_name','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (117,6,'pub_key','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (118,6,'vlb_private_net_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (119,6,'webserver_ip','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (120,6,'vdns_private_ip_1','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (121,6,'public_net_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (122,6,'vlb_private_ip_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (123,6,'vlb_name_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (124,6,'vdns_private_ip_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (125,6,'vdsn_name_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (126,6,'ecomp_private_net_cidr','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (127,6,'vf_module_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (128,6,'vlb_image_name','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (129,7,'vnf_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (130,7,'vf_module_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (131,7,'vlb_flavor_name','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (132,7,'vlb_image_name','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (133,7,'vdns_private_ip_1','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (134,7,'dcae_collector_ip','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (135,7,'webserver_ip','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (136,7,'vlb_private_net_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (137,7,'vdns_private_ip_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (138,7,'vdsn_name_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (139,7,'vlb_private_ip_0','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (140,7,'pub_key','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (141,7,'public_net_id','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (142,7,'key_name','\1','string',NULL);
+INSERT INTO `heat_template_params` VALUES (143,7,'ecomp_private_net_id','\1','string',NULL);
+/*!40000 ALTER TABLE `heat_template_params` ENABLE KEYS */;
+UNLOCK TABLES;
+
+LOCK TABLES `service` WRITE;
+/*!40000 ALTER TABLE `service` DISABLE KEYS */;
+INSERT INTO `service` VALUES (10,'dns-service','1.0','dns service for unit test','1e34774e-715e-4fd6-bd09-7b654622f35i',NULL,NULL,'2016-11-14 13:04:07','585822c8-4027-4f84-ba50-e9248606f111');
+/*!40000 ALTER TABLE `service` ENABLE KEYS */;
+UNLOCK TABLES;
+
+LOCK TABLES `vf_module` WRITE;
+/*!40000 ALTER TABLE `vf_module` DISABLE KEYS */;
+INSERT INTO `vf_module` VALUES (7,'dns-service/DNSResource-1::VF_RI1_DNS::module-1','1.0','VF_RI1_DNS::module-1','1.0','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-1::module-1.group',NULL,6,1,'2016-11-14 13:04:07',NULL,NULL,6,3,'585822c7-4027-4f84-ba50-e9248606f132');
+INSERT INTO `vf_module` VALUES (8,'dns-service/DNSResource-1::VF_RI1_DNS::module-2','1.0','VF_RI1_DNS::module-2','1.0','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-2::module-1.group',NULL,7,0,'2016-11-14 13:04:07',NULL,NULL,6,4,'585822c7-4027-4f84-ba50-e9248606f133');
+/*!40000 ALTER TABLE `vf_module` ENABLE KEYS */;
+UNLOCK TABLES;
+
+LOCK TABLES `vnf_resource` WRITE;
+/*!40000 ALTER TABLE `vnf_resource` DISABLE KEYS */;
+INSERT INTO `vnf_resource` VALUES (6,'dns-service/DNSResource-1','1.0','HEAT','dns service for unit test',NULL,NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f112','1.0','DNSResource-1','DNSResource','585822c8-4027-4f84-ba50-e9248606f111');
+/*!40000 ALTER TABLE `vnf_resource` ENABLE KEYS */;
+INSERT INTO `act_id_user` VALUES ('admin',1,'admin','user','camundaadmin@openecomp.org','{SHA}Y7MVubSDgzJeaulJRLN2dFyNCyc=',NULL);\r
+\r
+INSERT INTO `act_id_group` VALUES ('camunda-admin',1,'camunda BPM Administrators','SYSTEM');\r
+\r
+INSERT INTO `act_id_membership` VALUES ('admin','camunda-admin');\r
+\r
+INSERT INTO `act_ru_authorization` VALUES ('4ca68335-b7c5-11e6-b411-0242ac110003',1,1,NULL,'admin',1,'admin',2147483647),('4ca91b46-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,2,'camunda-admin',2),('4cab3e27-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,0,'*',2147483647),('4cadd638-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,1,'*',2147483647),('4caf0eb9-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,2,'*',2147483647),('4caff91a-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,3,'*',2147483647),('4cb10a8b-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,4,'*',2147483647),('4cb2430c-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,5,'*',2147483647),('4cb32d6d-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,6,'*',2147483647),('4cb43ede-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,7,'*',2147483647),('4cb5293f-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,8,'*',2147483647),('4cb5ec90-b7c5-11e6-b411-0242ac110003',1,1,'camunda-admin',NULL,9,'*',2147483647);\r
+${type_declaration}</template><template autoinsert="true" context="classbody_context" deleted="false" description="Code in new class type bodies" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.classbody" name="classbody">\r
+</template><template autoinsert="true" context="interfacebody_context" deleted="false" description="Code in new interface type bodies" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.interfacebody" name="interfacebody">\r
+</template><template autoinsert="true" context="enumbody_context" deleted="false" description="Code in new enum type bodies" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.enumbody" name="enumbody">\r
+</template><template autoinsert="true" context="annotationbody_context" deleted="false" description="Code in new annotation type bodies" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.annotationbody" name="annotationbody">\r
+</template><template autoinsert="true" context="catchblock_context" deleted="false" description="Code in new catch blocks" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.catchblock" name="catchblock">// ${todo} Auto-generated catch block\r
+${exception_var}.printStackTrace();</template><template autoinsert="true" context="methodbody_context" deleted="false" description="Code in created method stubs" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.methodbody" name="methodbody">// ${todo} Auto-generated method stub\r
+${body_statement}</template><template autoinsert="true" context="constructorbody_context" deleted="false" description="Code in created constructor stubs" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.constructorbody" name="constructorbody">${body_statement}\r
+// ${todo} Auto-generated constructor stub</template><template autoinsert="true" context="getterbody_context" deleted="false" description="Code in created getters" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.getterbody" name="getterbody">return ${field};</template><template autoinsert="true" context="setterbody_context" deleted="false" description="Code in created setters" enabled="true" id="org.eclipse.jdt.ui.text.codetemplates.setterbody" name="setterbody">${field} = ${param};</template></templates>
+OpenECOMP MSO is delivered with **2 Docker containers**, 1 hosting the **database** (MariaDB) and 1 hosting the **JBoss** application server running all OpenECOMP MSO code.
+
+Both containers runs on the same machine and can be started with a **`docker-compose`**.
+
+# Compiling MSO
+
+MSO can be compiled easily with a `mvn clean install`. Integration tests are started with the following profile
+`-P with-integration-tests`
+
+**to be edited for rrelease**
+Docker containers are build with the following profile
+| ecomp/mso | 1.0.0 | Contains **JBoss** + **OpenJDK** + **MSO** components (BPMN engine and MSO API handlers and adapters) |
+| library/mariadb | 10.1.11 | **MariaDB** image from Docker.io, this image hosts the database and is preloaded with MSO schema and configuration at startup |
+
+# Starting MSO
+
+There are several ways to start OpenECOMP MSO :
+
+### Trough the sample Jenkins Job on the Deployment Jenkins host ***to be changed for rrelease***
+
+A Jenkins instance is available at [Deployment-Jenkins](http://162.242.237.52:8080/login?from=%2F). It provides **job that can run** the containers on a sample host. ***add job link for rrelease**
+
+*** for rrelease all this should go in the job description***
+```
+The VM is running a Docker Engine (1.12) and has the Nexus repository certificate imported, Jenkins will use this host to deploy and run the containers.
+
+It will first **clone** the configuration repository to its local workspace, **transfer needed configuration files**, mount a specific **docker volume** with these files and start up the containers (attached to this volume) so that they can run the startup scripts that is embedded and run openecomp MSO and its database container.
+
+It is important to understand that the Docker containers are using a configuration file (JSON) in order to provision MSO basic configuration, in the above Jenkins Job, Jenkins pulls that JSON file from the MSO repository, any other mean to provide that JSON file (for specific environments) would also work. ***add link on rrelease***more information about configuring MSO.
+Note : the above is just an automation of the container deployment on a sample host, it is perfectly possible to do exactly the same on any host (be it your laptop or any other host that can run Docker 1.12 and reach the Nexus repository, and optionally the MSO gitlab repo to download other configurations)
+```
+
+The database is preloaded with all the basic configuration, it is possible to change the configuration without deploying the containers again (or rebuilding them)
+
+***provide a link on rrelease***
+To get a feel of how the deployment is actually performed, it is best to review the Docker Strategy of MSO and look at the actual Jenkins Job
+
+### Trough the use of `docker-compose` ***to be changed for rrelease***
+
+`docker-compose` can be used to start MSO. The files can be downloaded from the Nexus repository <add-link-to-Docker-Compose>
+
+***to be changed for rrelease**
+
+This job ***add link*** has been created to run the compose file.
+
+***this info will go in the job description ?***
+```
+It job uses the Sample Docker Host described above as a Jenkins Slave (so when the build is triggered on Jenkins, it runs locally on the remote Docker Host). It pulls the needed information from MSO Git lab repository (deploy.sh and `docker-compose` file)
+```
+
+### Heat template
+
+a Heat template that can be used on RackSpace to spin up the MSO Host VM and run docker-compose is currently being built by the Demo Team.
+
+# Accessing MSO
+
+MSO UIs are not really used for operating MSO, but they provide information on what is currently happening and get an insight on the components.
+
+### MSO JBoss console
+
+JBoss Wildly provides administrative functions through the application [server console](https://docs.jboss.org/author/display/WFLY10/Admin+Guide#AdminGuide-Accessingthewebconsole].
+
+Said console can be used to have a look at the status of MSO. It is providing details on deployed artifacts and gives a remote access to the main server log file
+
+***to be changed for the rrelease*** The UI can be accessed trough http://containerHostName:9990/ or on the sample host below : http://104.130.172.123:52090 (the sample host has port 9990 remapped to 52090 on the Rackspace VM)
+
+The configuration preloads a default user (admin) with the standard OpenECOMP UI password ***provide link on rrelease***.
+
+The configuration of JBoss should not be touched. But it is possible to look at the two following sections for insights on the MSO health :
+
+
+
+Deployments shows what is deployed and running on the application server, you should see the following once MSO is up and running (Actual names of the War files may differ but their numbers and general format should be the same)
+
+***to be upload when rrelease***
+
+Runtime can be used to have a look a the main server log files, see JVM status and parameters, environment settings etc,...
+See the logging section below for more details about other logfiles (EELF framework)
+
+### MSO Camunda Cockpit console
+
+MSO orchestration processes can be monitored with the [Camunda Engine cockpit UI](https://camunda.org/features/cockpit/). It gives an insight about the available processes, allows to trigger them manually and provides monitoring of the currently running processes
+
+**IMPORTANT NOTE** : since OpenECOMP MSO only uses Camunda Community version it is not possible to see history of running process as this is an Enterprise feature only.
+
+#### Accessing the Cockpit
+
+***to be changed for the rrelease*** The cockpit is available at the following address : http://containerHostname:8080/cockpit or on the Sample host below http://104.130.172.123:52080/cockpit (the sample host has port 8080 remapped to 52080 on the Rackspace VM)
+
+When the container is started, as it starts up from a fresh instance, it will create a default admin user (admin) with the default OpenECOMP password for UI***provide link on rrelease***
+
+The cockpit gives an overview of the available BPMN (orchestration) processes (with a visual representation), it is also possible to trigger them from the UI if you know the parameters that are needed.
+
+***screenshots to be uploaded when rrelease***
+
+### MSO APIs
+
+Most of the MSO features within OpenECOMP are triggered by using **RESTful interfaces**. MSO supports both **HTTP** and **HTTPS**, but is configured on this release with HTTP only using Basic Authentification.
+
+The MSO APIs are configured to accept requests having a **basic auth. header** set with various **username and password** depending on which API is being triggered.
+
+All API endpoints are exposed on port **8080**, it is possible to reach all MSO subsystems directly with the proper query (see more information below on how to test MSO functions)
+
+##### Main API endpoints in the first open source release
+The typical easy way to trigger these endpoints is to use a RESTful client or automation framework.
+
+# Configuration of MSO
+
+It is important to understand that the Docker containers are using a configuration file (JSON) in order to provision MSO basic configuration, in the above Jenkins Job, Jenkins pulls that JSON file from the MSO repository, any other mean to provide that JSON file (for specific environments) would also work.
+
+Once the deployment of the docker images is done, you will need to configure your installation to be able to interact with all the components that MSO needs.
+
+Change the environment file located here : **/shared/mso-docker.json** then run the following command `chef-solo -c /var/berks-cookbooks/chef-repo/solo.rb -o recipe[mso-config::apih],recipe[mso-config::bpmn],recipe[mso-config::jra]`
+
+**Important note:** The host mso is mapped automatically to c1.vm1.mso.simpledemo.openecomp.org in /etc/host of the docker image so you can keep mso:8080 when you want to mention the APIH, JRA or Camunda host.
+
+Here are the main parameters you could change:
+
+- mso_config_path: define the path where the configuration files for APIH, JRA and Camunda will be deployed. This parameter should not be changed.
+- In the section mso-bpmn-urn-config, adaptersDbEndpoint: This configuration must point to the APIH hostname. It should have this form: http://mso:8080/dbadapters/RequestsDbAdapter Do not change it if you are not sure.
+- In the section mso-bpmn-urn-config, aaiEndpoint: This parameter should point to the A&AI component. It should be something like: https://c1.vm1.aai.simpledemo.openecomp.org:8443
+- In the section mso-bpmn-urn-config, aaiAuth: This parameter is the encrypted value of login:password to access the A&AI server. The key used to encrypt is defined in the parameter msoKey.
+- In the section asdc-connection, asdcAddresss: Change the values with the value provided by the ASDC team. Possible value: https://c2.vm1.asdc.simpledemo.openecomp.org:8443 The password field may be changed as well.
+- In the section mso-sdnc-adapter-config, sdncurls: Change all the values with the value provided by the SDNC team. Possible value: https://c1.vm1.sdnc.simpledemo.openecomp.org:8443/... ? The sdncauth field may be changed as well.
+- In the section mso-appc-adapter-config, appc_url: Change the value with the value provided by the APPC team. Possible value: http://c1.vm1.appc.simpledemo.openecomp.org:8080 ? The appc_auth field may be changed as well.
+- In the section mso-po-adapter-config, identity_url: Change the values with the value provided by the PO team. Possible value: https://identity.api.rackspacecloud.com/v2.0 ?
+
+The credentials are defined in 2 places:
+
+- JBoss users credentials are defined in /opt/jboss/standalone/configuration/application-users.properties and are associated to the corresponding role in application-roles.properties (you should not change this file except if you add a new user)
+- In the environment. Replace the authorisation key in the file /shared/mso-docker.json and run the command to apply the configuration as explained above.
+
+You can encrypt the JBoss user with the following command `echo -n 'LOGIN:ApplicationRealm:PASSWORD' |openssl dgst -md5` and replace the line corresponding to this user in /opt/jboss/standalone/configuration/application-users.properties
+
+You can replace the authentication in the environment by the value returned by the following API `GET on http://c1.vm1.mso.simpledemo.openecomp.org:8080/asdc/properties/encrypt/{value}/{cryptKey}` where {value} is the string login:password and cryptKey (also defined in the environment) is the key to use to encrypt the credentials
+
+Exemple of credentials you could change:
+- BPELClient: if you change this credentials, you have to change it in JBoss users AND environment file (+ apply the new config) and be careful to set the same password. In the environment it is the parameter "adaptersPoAuth" under the section "mso-bpmn-urn-config". The cryptKey to use is 07a7159d3bf51a0e53be7a8f89699be7
+- BPMNClient: if you change this credentials, you have to change it in JBoss users AND environment file (+ apply the new config) and be careful to set the same password. In the environment it is the parameter "camundaAuth" under the sections "mso-api-handler-config" AND "mso-api-handler-infra-config". The cryptKey to use is
+aa3871669d893c7fb8abbcda31b88b4f
+
+# Logging
+
+### JBoss
+
+MSO log files are located the [JBoss log](https://docs.jboss.org/author/display/WFLY8/Logging+Configuration) folder in the container.
+
+### EELF
+
+EELF framework is used for **specific logs** (audit, metric and error logs). They are tracking inter component logs (request and response) and allow to follow a complete flow through the MSO subsystem
+
+EELF logs are located at the following location on the MSO JBoss container :
+
+- /var/log/ecomp/MSO (each module has its own folder)
+
+The DEBUG mode is enabled by module and has to be re-enabled if the application restart.
+
+It can be enabled with a GET on the following APIs:
+- Camunda (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/mso/logging/debug
+- APIH Infra (use any jboss user with role InfraPortal-Client for authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/ecomp/mso/infra/logging/debug
+- ASDC (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/asdc/logging/debug
+- DBAdapter (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/dbadapters/logging/debug
+- Network adapter (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/networks/rest/logging/debug
+- SDNC adapter (use any jboss user with role MSO-Client for authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/adapters/rest/logging/debug
+- VNF adapter (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/vnfs/rest/logging/debug
+- Tenant adapter (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/tenants/rest/logging/debug
+- APPC adapter (no authentication): http://c1.vm1.mso.simpledemo.openecomp.org:8080/appc/rest/logging/debug
+
+Default JBoss users:
+- with role CSI-Client: CSIClient/password1$
+- with role CSI-Client: InfraPortalClient/password1$
+- with role CSI-Client: MSOClient/password1$
+
+Note that these default users should be changed.
+
+# Testing MSO Functionalities
+
+For this first release of MSO, the queries to start the various VNFs should come first through API Handler.
+
+To help with the testing we are providing here a sample [SoapUI](https://www.soapui.org/) project [file](add link when rrealease) with the main queries that VID should send to MSO
+
+### To simulate Loading of Artifacts & models (bypass ASDC)i
+
+The MariaDB container can load up special SQL scripts that simulates the loading of ASDC components (as if they were received through the ASDC client)
+
+Simply use the load ability embedded to run the 'preload SQL' script for vFirewall or vDNS
+
+### Once the HEAT artifacts are loaded into MSO
+
+It is also possible to simulate queries to the PO (platform orchestrator) adapter of MSO (thus bypassing BPMN flows and API handler) to verify MSO interaction with Rackspace and verify the behavior of the Adapter (so that it loads HEAT and connect to Rackspace and instantiate elements)
+
+Below is a query used from FireFox RESTClient plugin to trigger MSO adapter directly (replace values accordingly)
+
+```
+ POST http://<containername>:8080/vnfs/rest/v1/vnfs/5259ba4a-cf0d-4791-9c60-9117faa5cdea/vf-modules
+ private static final String NOT_FOUND = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not started. Properties file missing or invalid or database Connection failed</body></html>";
+ private static final String NOT_HEALTHY = "<!DOCTYPE html><html><head><meta charset=\"ISO-8859-1\"><title>Application Not Started</title></head><body>Application not available or at least one of the sub-modules is not available.</body></html>";
+ public static final Response HEALTH_CHECK_RESPONSE = Response.status (HttpStatus.SC_OK)
+ .entity (CHECK_HTML)
+ .build ();
+ public static final Response HEALTH_CHECK_NOK_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_HEALTHY)
+ . build ();
+ public static final Response NOT_STARTED_RESPONSE = Response.status (HttpStatus.SC_SERVICE_UNAVAILABLE)
+ .entity (NOT_FOUND)
+ .build ();
+
+ public enum NodeType {APIH, RA};
+
+ public boolean catalogDBCheck (MsoLogger subMsoLogger, long startTime) {
+ try (CatalogDatabase catalogDB = new CatalogDatabase ()) {
+ subMsoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.ServiceNotAvailable, "Exception during local healthcheck");
+ return false;
+ }
+ return true;
+ }
+
+ public boolean siteStatusCheck (MsoLogger subMsoLogger, long startTime) {
+ // Check the Site Status value in DB first, if set to false, return NOK
+ String site = getProperty("site-name");
+
+ MsoStatusUtil statusUtil = new MsoStatusUtil ();
+ if (!statusUtil.getSiteStatus (site)) {
+ subMsoLogger.debug("This site is currently disabled for maintenance.");
+ return false;
+ }
+ return true;
+ }
+
+ public boolean configFileCheck (MsoLogger subMsoLogger, long startTime, String propertiesFile) {
+ subMsoLogger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.ServiceNotAvailable, "Configuration file can not be loaded");
+ if (null == port || null == ip || ip.isEmpty() || port.isEmpty()) {
+ msoLogger.error (MessageEnum.GENERAL_EXCEPTION_ARG, "Not able to get the IP or the Port value. IP:" + ip + "; Port:" + port, "", "HealthCheck", MsoLogger.ErrorCode.DataError, "Not able to get the IP or the Port value. IP:" + ip + "; Port:" + port);
+ msoLogger.error (MessageEnum.GENERAL_EXCEPTION_ARG, "Not able to get apih-healthcheck-urn parameter", "", "HealthCheck", MsoLogger.ErrorCode.DataError, "Not able to get apih-healthcheck-urn parameter");
+ msoLogger.error (MessageEnum.GENERAL_EXCEPTION_ARG, "Not able to get jra-healthcheck-urn parameter", "", "HealthCheck", MsoLogger.ErrorCode.DataError, "Not able to get jra-healthcheck-urn parameter");
+ msoLogger.error (MessageEnum.GENERAL_EXCEPTION_ARG, "Not able to find the topology file", "", "HealthCheck", MsoLogger.ErrorCode.PermissionError, "Not able to find the topology file");
+ msoLogger.error (MessageEnum.GENERAL_EXCEPTION_ARG, "Key parameters are missing from the topology file", "", "HealthCheck", MsoLogger.ErrorCode.DataError, "Key parameters are missing from the topology file");
+ return false;
+ }
+
+ // Verify health check on APIH servers
+ if (!this.verifyLocalHealth (apihLB, null, apihApi, null, requestId)) {
+ return false;
+ }
+
+ // Verify health check on Camunda servers
+ if (verifyBpmn) {
+ if (!this.verifyLocalHealth (bpmnLB, null, bpmnApi, null, requestId)) {
+ return false;
+ }
+ }
+
+ // Verify health check on RA servers
+ if (!verifyLocalHealth (jraLB, null, jraApi, null, requestId)) {
+ public Response setSiteStatus (@DefaultValue("true") @QueryParam("enable") Boolean enable,
+ @PathParam("siteName") String siteName) {
+ long startTime = System.currentTimeMillis();
+ // Set logger parameters
+ UUIDChecker.generateUUID (logger);
+ MsoLogger.setServiceName ("SetSiteStatus");
+
+
+ if (null == siteName) {
+ logger.recordAuditEvent (startTime, MsoLogger.StatusCode.ERROR, MsoLogger.ResponseCode.BadRequest, "Not able to find the site name attribute in the config file");
+ return Response.status (Response.Status.INTERNAL_SERVER_ERROR).entity ("Exception: not able to find the site name attribute in the config file").build ();