Wrap infra_workload API call to k8s plugin 38/102538/8
authorBin Yang <bin.yang@windriver.com>
Fri, 28 Feb 2020 04:38:02 +0000 (12:38 +0800)
committerBin Yang <bin.yang@windriver.com>
Fri, 28 Feb 2020 07:05:49 +0000 (15:05 +0800)
Create profile during post API

Change-Id: Ia68eb34f4c1baf01ad96aaaf6b4147a78374de60
Issue-ID: MULTICLOUD-1003
Signed-off-by: Bin Yang <bin.yang@windriver.com>
share/newton_base/util.py
share/starlingx_base/registration/registration.py
share/starlingx_base/resource/infra_workload.py
share/starlingx_base/resource/k8s_infra_workload_helper.py [new file with mode: 0644]
share/starlingx_base/resource/openstack_infra_workload_helper.py [new file with mode: 0644]

index aa03b65..b06529b 100644 (file)
@@ -177,3 +177,20 @@ class VimDriverUtils(object):
                 return result
             return profiled_func
         return wrapper
+
+
+    @staticmethod
+    def check_k8s_cluster(viminfo):
+        """
+        check if cloud region is k8s cluster
+        """
+        software_version = viminfo.get("cloud_extra_info_json", {})\
+        .get("isystem", {}).get("software_version")
+
+        is_k8s_cluster = False
+        if software_version == "19.12":
+            is_k8s_cluster = True
+        elif software_version == "19.10":
+            is_k8s_cluster = True
+
+        return is_k8s_cluster
index 73c9be1..4d5e1f3 100644 (file)
@@ -473,31 +473,15 @@ class RegistryHelper(newton_registration.RegistryHelper):
         try:
             cloud_extra_info = viminfo.get("cloud_extra_info_json",{})
 
-            vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
-
-            # check system version of starlingx
-            system_info = cloud_extra_info.get("isystem", {})
-            systemversion = system_info.get("software_version", None)
-            if not systemversion:
-                self._logger.warn("query system version fails")
-                return
-
             # check if a k8s platform
-            is_k8s_cluster = False
-            # check WRCP versions:
-            if systemversion == "19.12":
-                is_k8s_cluster = True
-            elif systemversion == "19.10":
-                is_k8s_cluster = True
-
-            if not is_k8s_cluster:
-                self._logger.info("%s, %s is not a k8s platform, system version: %s"
-                    % (cloud_owner, cloud_region_id, systemversion))
+            if VimDriverUtils.check_k8s_cluster(viminfo):
+                self._logger.info("%s, %s is not a k8s platform"
+                    % (cloud_owner, cloud_region_id))
                 return
 
             # check if user token provided to access k8s platform
-            k8s_apitoken = cloud_extra_info.get("k8s-apitoken", None)
-            k8s_apiserver = cloud_extra_info.get("k8s-apiserver", None)
+            k8s_apitoken = cloud_extra_info.get("k8s-apitoken")
+            k8s_apiserver = cloud_extra_info.get("k8s-apiserver")
             if not k8s_apitoken or not k8s_apiserver:
                 self._logger.warn("k8s-apitoken or k8s-apiserver is not provided,"\
                     "k8s connectivity must be provisioned in other ways")
index acce28a..24a66ee 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
+# Copyright (c) 2017-2020 Wind River Systems, Inc.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
 
 import os
 import json
-
 import logging
+
 from django.conf import settings
 from django.http import QueryDict
 from rest_framework import status
@@ -25,21 +25,14 @@ from common.msapi.helper import Helper as helper
 from common.msapi.helper import MultiCloudThreadHelper
 
 from newton_base.resource import infra_workload as newton_infra_workload
-from newton_base.resource import infra_workload_helper as infra_workload_helper
+from starlingx_base.resource import openstack_infra_workload_helper
+from starlingx_base.resource import k8s_infra_workload_helper
 
 from newton_base.util import VimDriverUtils
 
-import yaml
-NoDatesSafeLoader = yaml.SafeLoader
-NoDatesSafeLoader.yaml_implicit_resolvers = {
-    k: [r for r in v if r[0] != 'tag:yaml.org,2002:timestamp'] for
-        k, v in list(NoDatesSafeLoader.yaml_implicit_resolvers.items())
-}
-
 logger = logging.getLogger(__name__)
 
 
-
 # global var: Audition thread
 # the id is the workloadid, which implies post to workloadid1 followed by delete workloadid1
 # will replace the previous backlog item
@@ -54,9 +47,6 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
         self._logger.info("data: %s" % request.data)
         self._logger.debug("META: %s" % request.META)
 
-        # Get the specified tenant id
-        specified_project_idorname = request.META.get("Project", None)
-
         resp_template = {
             "template_type": "HEAT",
             "workload_id": workloadid,
@@ -65,8 +55,26 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
         }
         status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
 
+        # check if target to k8s
+        viminfo = VimDriverUtils.get_vim_info(vimid)
+        if VimDriverUtils.check_k8s_cluster(viminfo):
+            try:
+                # wrap call to multicloud-k8s
+                return k8s_infra_workload_helper.InfraWorkloadHelper.workload_create(
+                    self, vimid, workloadid, request)
+            except Exception as e:
+                errmsg = str(e)
+                self._logger.error(errmsg)
+                resp_template["workload_status_reason"] = errmsg
+                return Response(data=resp_template,
+                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+        # otherwise, target to openstack
+        # Get the specified tenant id
+        specified_project_idorname = request.META.get("Project", None)
+
         try:
-            worker_self = InfraWorkloadHelper(
+            worker_self = openstack_infra_workload_helper.InfraWorkloadHelper(
                 settings.MULTICLOUD_API_V1_PREFIX,
                 settings.AAI_BASE_URL
             )
@@ -154,9 +162,6 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
         self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
         self._logger.debug("META: %s" % request.META)
 
-        # Get the specified tenant id
-        specified_project_idorname = request.META.get("Project", None)
-
         resp_template = {
             "template_type": "HEAT",
             "workload_id": workloadid,
@@ -164,6 +169,25 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
             "workload_status_reason": "Exception occurs"
         }
         status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
+
+        # check if target to k8s
+        viminfo = VimDriverUtils.get_vim_info(vimid)
+        if VimDriverUtils.check_k8s_cluster(viminfo):
+            try:
+                # wrap call to multicloud-k8s
+                return k8s_infra_workload_helper.InfraWorkloadHelper.workload_detail(
+                    self, vimid, workloadid, request)
+            except Exception as e:
+                errmsg = str(e)
+                self._logger.error(errmsg)
+                resp_template["workload_status_reason"] = errmsg
+                return Response(data=resp_template,
+                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+        # Otherwise target to openstack
+        #  Get the specified tenant id
+        specified_project_idorname = request.META.get("Project", None)
+
         try:
 
             if workloadid == "":
@@ -181,7 +205,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
                         status=status.HTTP_400_BAD_REQUEST
                     )
                 else:
-                    worker_self = InfraWorkloadHelper(
+                    worker_self = openstack_infra_workload_helper.InfraWorkloadHelper(
                         settings.MULTICLOUD_API_V1_PREFIX,
                         settings.AAI_BASE_URL
                     )
@@ -216,7 +240,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
                 backlog_item = gInfraWorkloadThread.get(workloadid)
                 if not backlog_item:
                     # backlog item not found, so check the stack status
-                    worker_self = InfraWorkloadHelper(
+                    worker_self = openstack_infra_workload_helper.InfraWorkloadHelper(
                         settings.MULTICLOUD_API_V1_PREFIX,
                         settings.AAI_BASE_URL
                     )
@@ -261,9 +285,6 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
         self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
         self._logger.debug("META: %s" % request.META)
 
-        # Get the specified tenant id
-        specified_project_idorname = request.META.get("Project", None)
-
         resp_template = {
             "template_type": "HEAT",
             "workload_id": workloadid,
@@ -271,8 +292,26 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
             "workload_status_reason": "Exception occurs"
         }
         status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
-        try:
 
+        # check if target to k8s
+        viminfo = VimDriverUtils.get_vim_info(vimid)
+        if VimDriverUtils.check_k8s_cluster(viminfo):
+            try:
+                # wrap call to multicloud-k8s
+                return k8s_infra_workload_helper.InfraWorkloadHelper.workload_delete(
+                    self, vimid, workloadid, request)
+            except Exception as e:
+                errmsg = str(e)
+                self._logger.error(errmsg)
+                resp_template["workload_status_reason"] = errmsg
+                return Response(data=resp_template,
+                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+        # otherwise, target to openstack
+        # Get the specified tenant id
+        specified_project_idorname = request.META.get("Project", None)
+
+        try:
             if workloadid == "":
                 resp_template["workload_status_reason"] =\
                     "workload id is not found in API url"
@@ -285,7 +324,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
             super(InfraWorkload, self).delete(request, vimid, workloadid)
 
             # backlog for a post to heatbridge delete
-            worker_self = InfraWorkloadHelper(
+            worker_self = openstack_infra_workload_helper.InfraWorkloadHelper(
                 settings.MULTICLOUD_API_V1_PREFIX,
                 settings.AAI_BASE_URL
             )
@@ -373,200 +412,3 @@ class APIv1InfraWorkload(InfraWorkload):
 
         vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
         return super(APIv1InfraWorkload, self).delete(request, vimid, workloadid)
-
-
-class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
-
-    def __init__(self, multicloud_prefix, aai_base_url):
-        super(InfraWorkloadHelper, self).__init__(multicloud_prefix, aai_base_url)
-        self._logger = logger
-
-    def param_update_user_directives(self, parameters, oof_directives):
-        for attr in oof_directives.get("attributes", []):
-            aname = attr.get("attribute_name", None)
-            avalue = attr.get("attribute_value", None)
-            if aname in parameters:
-                parameters[aname] = avalue
-            else:
-                self._logger.warn(
-                    "There is no parameter exist: %s" % aname)
-
-        return parameters
-
-    def param_update_sdnc_directives(self, parameters, sdnc_directives):
-        for attr in sdnc_directives.get("attributes", []):
-            aname = attr.get("attribute_name", None)
-            avalue = attr.get("attribute_value", None)
-            if aname in parameters:
-                parameters[aname] = avalue
-            else:
-                self._logger.warn(
-                    "There is no parameter exist: %s" % aname)
-
-        return parameters
-
-    def param_update_oof_directives(self, parameters, oof_directives):
-        for directive in oof_directives.get("directives", []):
-            if directive["type"] == "vnfc":
-                for directive2 in directive.get("directives", []):
-                    if directive2["type"] in ["flavor_directives",
-                                              "sriovNICNetwork_directives"]:
-                        for attr in directive2.get("attributes", []):
-                            flavor_label = attr.get("attribute_name", None)
-                            flavor_value = attr.get("attribute_value", None)
-                            if flavor_label in parameters:
-                                parameters[flavor_label] = flavor_value
-                            else:
-                                self._logger.warn(
-                                    "There is no parameter exist: %s" %
-                                    flavor_label)
-
-        return parameters
-
-    def openstack_template_update(self, template_data, vf_module_model_customization_id):
-        # try 1: check if artifact is available with vfmodule_uuid
-        # assumption: mount point: /opt/artifacts/<vfmodule_uuid>
-        try:
-            vfmodule_path_base = r"/opt/artifacts/%s" % vf_module_model_customization_id
-            self._logger.debug("vfmodule_path_base: %s" % vfmodule_path_base)
-            vfmodule_metadata_path = r"%s/vfmodule-meta.json" % vfmodule_path_base
-            service_metadata_path = r"%s/service-meta.json" % vfmodule_path_base
-            with open(vfmodule_metadata_path,
-                      'r') as vf:
-                vfmodule_metadata_str = vf.read()  # assume the metadata file size is small
-                vfmodule_metadata = json.loads(vfmodule_metadata_str)
-                vfmodule_metadata = [e for e in vfmodule_metadata
-                                     if e.get("vfModuleModelCustomizationUUID", None)
-                                     == vf_module_model_customization_id]
-                self._logger.debug("vfmodule_metadata: %s" % vfmodule_metadata)
-                if vfmodule_metadata and len(vfmodule_metadata) > 0:
-                    # load service-metadata
-                    with open(service_metadata_path,
-                              'r') as sf:
-                        service_metadata_str = sf.read()  # assume the metadata file size is small
-                        service_metadata = json.loads(service_metadata_str)
-                        self._logger.debug("service_metadata: %s" % service_metadata)
-                        if service_metadata and len(service_metadata) > 0:
-                            # get the artifacts uuid
-                            artifacts_uuids = vfmodule_metadata[0].get("artifacts", None)
-                            self._logger.debug("artifacts_uuids: %s" % artifacts_uuids)
-                            templatedata1 = template_data.copy()
-                            for a in service_metadata["artifacts"]:
-                                artifactUUID = a.get("artifactUUID", "")
-                                if artifactUUID not in artifacts_uuids:
-                                    continue
-                                artifact_type = a.get("artifactType", "")
-                                artifact_name = a.get("artifactName", "")
-                                artifact_path = r"%s/%s" % (vfmodule_path_base, artifact_name)
-                                self._logger.debug("artifact_path: %s" % artifact_path)
-
-                                # now check the type
-                                if artifact_type.lower() == "heat":
-                                    # heat template file
-                                    with open(artifact_path,
-                                              'r') as af:
-                                        # assume the template file size is small
-                                        templatedata1["template"] = \
-                                            yaml.load(af, Loader=NoDatesSafeLoader)
-                                    # pass
-
-                                elif artifact_type.lower() == "heat_env":
-                                    # heat env file
-                                    with open(artifact_path,
-                                              'r') as af:
-                                        # assume the env file size is small
-                                        templatedata1.update(yaml.load(
-                                            af, Loader=NoDatesSafeLoader))
-                                    # pass
-                                # pass
-                            return templatedata1
-                        else:
-                            pass
-                else:
-                    self._logger.info("artifacts not available for vfmodule %s" % vf_module_model_customization_id)
-                    pass
-        except Exception as e:
-            self._logger.error("template_update fails: %s" % str(e))
-
-        # try 2: reuse the input: template_data
-        return template_data
-
-    def workload_create(self, vimid, workload_data, project_idorname=None):
-        '''
-        Instantiate a stack over target cloud region (OpenStack instance)
-        The template for workload will be fetched from sdc client
-        :param vimid:
-        :param workload_data:
-        :param project_idorname: tenant id or name
-        :return: result code, status enum, status reason
-            result code: 0-ok, otherwise error
-            status enum: "CREATE_IN_PROGRESS", "CREATE_FAILED"
-            status reason: message to explain the status enum
-        '''
-
-        # step 2: normalize the input: xxx_directives
-        data = workload_data
-        vf_module_model_customization_id = data.get("vf-module-model-customization-id", None)
-        vf_module_id = data.get("vf-module-id", "")
-        user_directive = data.get("user_directives", {})
-        oof_directive = data.get("oof_directives", {})
-        sdnc_directive = data.get("sdnc_directives", {})
-        template_type = data.get("template_type", None)
-        template_data = data.get("template_data", {})
-        # resp_template = None
-        if not template_type or "heat" != template_type.lower():
-            return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \
-                   "Bad parameters: template type %s is not heat" %\
-                   template_type or ""
-
-        # retrieve the template data
-        template_data = self.openstack_template_update(template_data, vf_module_model_customization_id)
-
-        # update the parameter in order of reverse precedence
-        parameters = template_data.get("parameters", {})
-        parameters = self.param_update_sdnc_directives(parameters, sdnc_directive)
-        parameters = self.param_update_oof_directives(parameters, oof_directive)
-        parameters = self.param_update_user_directives(parameters, user_directive)
-        template_data["parameters"] = parameters
-
-        # reset to make sure "files" are empty
-        template_data["files"] = {}
-
-        template_data["stack_name"] =\
-            template_data.get("stack_name", vf_module_id)
-
-        # authenticate
-        cloud_owner, regionid = extsys.decode_vim_id(vimid)
-        # should go via multicloud proxy so that
-        #  the selflink is updated by multicloud
-        retcode, v2_token_resp_json, os_status = \
-            helper.MultiCloudIdentityHelper(
-                settings.MULTICLOUD_API_V1_PREFIX,
-                cloud_owner, regionid, "/v2.0/tokens",
-                {"Project": project_idorname}
-            )
-        if retcode > 0 or not v2_token_resp_json:
-            errmsg = "authenticate fails:%s,%s, %s" %\
-                     (cloud_owner, regionid, v2_token_resp_json)
-            logger.error(errmsg)
-            return (
-                os_status, "CREATE_FAILED", errmsg
-            )
-
-        # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
-        service_type = "orchestration"
-        resource_uri = "/stacks"
-        self._logger.info("create stack resources, URI:%s" % resource_uri)
-        retcode, content, os_status = \
-            helper.MultiCloudServiceHelper(cloud_owner, regionid,
-                                           v2_token_resp_json,
-                                           service_type, resource_uri,
-                                           template_data, "POST")
-
-        if retcode == 0:
-            stack1 = content.get('stack', None)
-            # stackid = stack1["id"] if stack1 else ""
-            return 0, "CREATE_IN_PROGRESS", stack1
-        else:
-            self._logger.info("workload_create fails: %s" % content)
-            return os_status, "CREATE_FAILED", content
diff --git a/share/starlingx_base/resource/k8s_infra_workload_helper.py b/share/starlingx_base/resource/k8s_infra_workload_helper.py
new file mode 100644 (file)
index 0000000..eef6d3a
--- /dev/null
@@ -0,0 +1,218 @@
+# Copyright (c) 2017-2020 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import logging
+import json
+import requests
+import tarfile
+from ruamel import yaml
+
+
+from rest_framework import status
+from rest_framework.response import Response
+
+from django.conf import settings
+from common.msapi import extsys
+from newton_base.util import VimDriverUtils
+
+logger = logging.getLogger(__name__)
+
+
+# wrap calls to multicloud-k8s infra_workload API
+class InfraWorkloadHelper:
+
+    # def resettarfile(tarinfo):
+    #     tarinfo.uid = tarinfo.gid = 0
+    #     tarinfo.uname = tarinfo.gname = "root"
+    #     return tarinfo
+
+    @staticmethod
+    def workload_create(self, vimid, workloadid, request):
+        '''
+        Deploy workload to target k8s via multicloud-k8s
+        :param vimid:
+        :param workloadid:
+        :param request
+        '''
+        # resp_template = {
+        #     "template_type": "HEAT",
+        #     "workload_id": workloadid,
+        #     "workload_status": "GET_FAILED",
+        #     "workload_status_reason": "Exception occurs"
+        # }
+        # status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
+
+        # viminfo = VimDriverUtils.get_vim_info(vimid)
+        workload_query = VimDriverUtils.get_query_part(request)
+        workload_data = request.data
+
+        # vf_module_model_customization_id = data.get("vf-module-model-customization-id", None)
+        # vf_module_id = data.get("vf-module-id", "")
+        user_directive = workload_data.get("user_directives", {})
+        # oof_directive = data.get("oof_directives", {})
+        # sdnc_directive = data.get("sdnc_directives", {})
+        # template_type = data.get("template_type")
+        # template_data = data.get("template_data", {})
+
+        # 1, create profile if not exists
+        # manifest.yaml content
+        manifest_yaml = {
+            "version": "v1",
+            "type": {
+                "values": "override_values.yaml"
+            }
+        }
+
+        # override_values.yaml content
+        override_values_yaml = ""
+
+        # extract rb and profile info from user_directive
+        rbname = "fakerbname"
+        rbversion = "1"
+        profilename = "p1"
+
+        for attr in user_directive.get("attributes", []):
+            aname = attr.get("attribute_name", None)
+            avalue = attr.get("attribute_value", None)
+            if aname == "override_values":
+                # manifest_yaml = avalue["manifest_yaml"]
+                # #override_values_yaml = avalue["override_values_yaml"]
+                override_values_yaml = avalue
+            elif aname == "definition-name":
+                rbname = avalue
+            elif aname == "definition-version":
+                rbversion = avalue
+            elif aname == "profile-name":
+                profilename = avalue
+
+        # package them into tarball
+        basedir="/tmp/%s_%s_%s/" % (rbname, rbversion, profilename)
+        manifest_yaml_filename="manifest.yaml"
+        override_values_yaml_filename = "override_values.yaml"
+        profile_filename = "profile.tar.gz"
+        if not os.path.isdir(basedir):
+            os.mkdir(basedir)
+        logger.debug("k8s profile temp dir for %s,%s,%s is %s" % (rbname, rbversion, profilename, basedir))
+        with open(basedir+manifest_yaml_filename, "w") as f:
+            yaml.dump(manifest_yaml, f, Dumper=yaml.RoundTripDumper)
+        with open(basedir+override_values_yaml_filename, "w") as f:
+            #yaml.dump(override_values_yaml, f, Dumper=yaml.RoundTripDumper)
+            f.write(override_values_yaml)
+
+        tar = tarfile.open(basedir+profile_filename, "w:gz")
+        # tar.add(basedir+manifest_yaml_filename, arcname=manifest_yaml_filename,filter=resettarfile)
+        tar.add(basedir+manifest_yaml_filename, arcname=manifest_yaml_filename)
+        tar.add(basedir+override_values_yaml_filename, arcname=override_values_yaml_filename)
+        tar.close()
+
+        # create profile and upload content
+        create_rbprofile_json = {
+            "rb-name": rbname,
+            "rb-version": rbversion,
+            "profile-name": profilename,
+            "release-name": "r1",
+            "namespace": "testnamespace1",
+            "kubernetes-version": "1.16.2"
+        }
+
+        multicloudK8sUrl = "%s://%s:%s/api/multicloud-k8s/v1" % (
+            settings.MSB_SERVICE_PROTOCOL, settings.MSB_SERVICE_ADDR, settings.MSB_SERVICE_PORT)
+        profileUrl = multicloudK8sUrl+"/v1/rb/definition/%s/%s/profile" % (rbname, rbversion)
+
+        #data = open('create_rbprofile.json')
+        response = requests.post(profileUrl, data=json.dumps(create_rbprofile_json), verify=False)
+        logger.debug("create profile, returns: %s,%s" % (response.content, response.status_code))
+
+        profileContentUrl = profileUrl + "/%s/content" % (profilename)
+        #profileContent = open(basedir+profile_filename, 'rb').read()
+        with open(basedir+profile_filename, "rb") as profileContent:
+            response = requests.post(profileContentUrl, data=profileContent.read(), verify=False)
+            logger.debug("upload profile content, returns: %s,%s" % (response.content, response.status_code))
+
+        # 2.forward infra_workload API requests with queries
+        cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+        infraUrl = multicloudK8sUrl+"/%s/%s/infra_workload" % (cloud_owner, cloud_region_id)
+        if workload_query:
+            infraUrl += ("?%s" % workload_query)
+
+        # should we forward headers ? TBD
+        resp = requests.post(infraUrl, data=workload_data, verify=False)
+        # resp_template["workload_status_reason"] = resp.content
+        # status_code = resp.status_code
+        return Response(data=resp.content, status=resp.status_code)
+
+
+    @staticmethod
+    def workload_delete(self, vimid, workloadid, request):
+        '''
+        remove workload
+        '''
+        # resp_template = {
+        #     "template_type": "HEAT",
+        #     "workload_id": workloadid,
+        #     "workload_status": "GET_FAILED",
+        #     "workload_status_reason": "Exception occurs"
+        # }
+        # status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
+
+        workload_query_str = VimDriverUtils.get_query_part(request)
+        workload_data = request.data
+        
+        multicloudK8sUrl = "%s://%s:%s/api/multicloud-k8s/v1" % (
+            settings.MSB_SERVICE_PROTOCOL, settings.MSB_SERVICE_ADDR, settings.MSB_SERVICE_PORT)
+
+        # 1.forward infra_workload API requests with queries
+        cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+        infraUrl = multicloudK8sUrl+"/%s/%s/infra_workload" % (cloud_owner, cloud_region_id)
+        if workload_query_str:
+            infraUrl += ("?%s" % workload_query_str)
+
+        # should we forward headers ? TBD
+        resp = requests.delete(infraUrl, data=workload_data, verify=False)
+        # resp_template["workload_status_reason"] = resp.content
+        # status_code = resp.status_code        
+        return Response(data=resp.content, status=resp.status_code)
+
+
+    @staticmethod
+    def workload_detail(self, vimid, workloadid, request):
+        '''
+        get workload status
+        '''
+        # resp_template = {
+        #     "template_type": "HEAT",
+        #     "workload_id": workloadid,
+        #     "workload_status": "GET_FAILED",
+        #     "workload_status_reason": "Exception occurs"
+        # }
+        # status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
+
+        workload_query_str = VimDriverUtils.get_query_part(request)
+        workload_data = request.data
+        
+        multicloudK8sUrl = "%s://%s:%s/api/multicloud-k8s/v1" % (
+            settings.MSB_SERVICE_PROTOCOL, settings.MSB_SERVICE_ADDR, settings.MSB_SERVICE_PORT)
+
+        # 1.forward infra_workload API requests with queries
+        cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+        infraUrl = multicloudK8sUrl+"/%s/%s/infra_workload" % (cloud_owner, cloud_region_id)
+        if workload_query_str:
+            infraUrl += ("?%s" % workload_query_str)
+
+        # should we forward headers ? TBD
+        resp = requests.get(infraUrl, data=workload_data, verify=False)
+        # resp_template["workload_status_reason"] = resp.content
+        # status_code = resp.status_code
+        return Response(data=resp.content, status=resp.status_code)
diff --git a/share/starlingx_base/resource/openstack_infra_workload_helper.py b/share/starlingx_base/resource/openstack_infra_workload_helper.py
new file mode 100644 (file)
index 0000000..f12afd4
--- /dev/null
@@ -0,0 +1,233 @@
+# Copyright (c) 2017-2020 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+
+from rest_framework import status
+from django.conf import settings
+from common.msapi import extsys
+from common.msapi.helper import Helper as helper
+from newton_base.util import VimDriverUtils
+
+# from newton_base.registration import registration as newton_registration
+from newton_base.resource import infra_workload_helper as newton_infra_workload_helper
+
+import yaml
+NoDatesSafeLoader = yaml.SafeLoader
+NoDatesSafeLoader.yaml_implicit_resolvers = {
+    k: [r for r in v if r[0] != 'tag:yaml.org,2002:timestamp'] for
+        k, v in list(NoDatesSafeLoader.yaml_implicit_resolvers.items())
+}
+
+
+logger = logging.getLogger(__name__)
+
+
+# helper for infra_workload API handler targeting to openstack heat
+class InfraWorkloadHelper(newton_infra_workload_helper.InfraWorkloadHelper):
+
+    def __init__(self, multicloud_prefix, aai_base_url):
+        super(InfraWorkloadHelper, self).__init__(multicloud_prefix, aai_base_url)
+        self._logger = logger
+
+    def param_update_user_directives(self, parameters, oof_directives):
+        for attr in oof_directives.get("attributes", []):
+            aname = attr.get("attribute_name", None)
+            avalue = attr.get("attribute_value", None)
+            if aname in parameters:
+                parameters[aname] = avalue
+            else:
+                self._logger.warn(
+                    "There is no parameter exist: %s" % aname)
+
+        return parameters
+
+    def param_update_sdnc_directives(self, parameters, sdnc_directives):
+        for attr in sdnc_directives.get("attributes", []):
+            aname = attr.get("attribute_name", None)
+            avalue = attr.get("attribute_value", None)
+            if aname in parameters:
+                parameters[aname] = avalue
+            else:
+                self._logger.warn(
+                    "There is no parameter exist: %s" % aname)
+
+        return parameters
+
+    def param_update_oof_directives(self, parameters, oof_directives):
+        for directive in oof_directives.get("directives", []):
+            if directive["type"] == "vnfc":
+                for directive2 in directive.get("directives", []):
+                    if directive2["type"] in ["flavor_directives",
+                                              "sriovNICNetwork_directives"]:
+                        for attr in directive2.get("attributes", []):
+                            flavor_label = attr.get("attribute_name", None)
+                            flavor_value = attr.get("attribute_value", None)
+                            if flavor_label in parameters:
+                                parameters[flavor_label] = flavor_value
+                            else:
+                                self._logger.warn(
+                                    "There is no parameter exist: %s" %
+                                    flavor_label)
+
+        return parameters
+
+    def openstack_template_update(self, template_data, vf_module_model_customization_id):
+        # try 1: check if artifact is available with vfmodule_uuid
+        # assumption: mount point: /opt/artifacts/<vfmodule_uuid>
+        try:
+            vfmodule_path_base = r"/opt/artifacts/%s" % vf_module_model_customization_id
+            self._logger.debug("vfmodule_path_base: %s" % vfmodule_path_base)
+            vfmodule_metadata_path = r"%s/vfmodule-meta.json" % vfmodule_path_base
+            service_metadata_path = r"%s/service-meta.json" % vfmodule_path_base
+            with open(vfmodule_metadata_path,
+                      'r') as vf:
+                vfmodule_metadata_str = vf.read()  # assume the metadata file size is small
+                vfmodule_metadata = json.loads(vfmodule_metadata_str)
+                vfmodule_metadata = [e for e in vfmodule_metadata
+                                     if e.get("vfModuleModelCustomizationUUID", None)
+                                     == vf_module_model_customization_id]
+                self._logger.debug("vfmodule_metadata: %s" % vfmodule_metadata)
+                if vfmodule_metadata and len(vfmodule_metadata) > 0:
+                    # load service-metadata
+                    with open(service_metadata_path,
+                              'r') as sf:
+                        service_metadata_str = sf.read()  # assume the metadata file size is small
+                        service_metadata = json.loads(service_metadata_str)
+                        self._logger.debug("service_metadata: %s" % service_metadata)
+                        if service_metadata and len(service_metadata) > 0:
+                            # get the artifacts uuid
+                            artifacts_uuids = vfmodule_metadata[0].get("artifacts", None)
+                            self._logger.debug("artifacts_uuids: %s" % artifacts_uuids)
+                            templatedata1 = template_data.copy()
+                            for a in service_metadata["artifacts"]:
+                                artifactUUID = a.get("artifactUUID", "")
+                                if artifactUUID not in artifacts_uuids:
+                                    continue
+                                artifact_type = a.get("artifactType", "")
+                                artifact_name = a.get("artifactName", "")
+                                artifact_path = r"%s/%s" % (vfmodule_path_base, artifact_name)
+                                self._logger.debug("artifact_path: %s" % artifact_path)
+
+                                # now check the type
+                                if artifact_type.lower() == "heat":
+                                    # heat template file
+                                    with open(artifact_path,
+                                              'r') as af:
+                                        # assume the template file size is small
+                                        templatedata1["template"] = \
+                                            yaml.load(af, Loader=NoDatesSafeLoader)
+                                    # pass
+
+                                elif artifact_type.lower() == "heat_env":
+                                    # heat env file
+                                    with open(artifact_path,
+                                              'r') as af:
+                                        # assume the env file size is small
+                                        templatedata1.update(yaml.load(
+                                            af, Loader=NoDatesSafeLoader))
+                                    # pass
+                                # pass
+                            return templatedata1
+                        else:
+                            pass
+                else:
+                    self._logger.info("artifacts not available for vfmodule %s" % vf_module_model_customization_id)
+                    pass
+        except Exception as e:
+            self._logger.error("template_update fails: %s" % str(e))
+
+        # try 2: reuse the input: template_data
+        return template_data
+
+    def workload_create(self, vimid, workload_data, project_idorname=None):
+        '''
+        Instantiate a stack over target cloud region (OpenStack instance)
+        The template for workload will be fetched from sdc client
+        :param vimid:
+        :param workload_data:
+        :param project_idorname: tenant id or name
+        :return: result code, status enum, status reason
+            result code: 0-ok, otherwise error
+            status enum: "CREATE_IN_PROGRESS", "CREATE_FAILED"
+            status reason: message to explain the status enum
+        '''
+
+        # step 2: normalize the input: xxx_directives
+        data = workload_data
+        vf_module_model_customization_id = data.get("vf-module-model-customization-id", None)
+        vf_module_id = data.get("vf-module-id", "")
+        user_directive = data.get("user_directives", {})
+        oof_directive = data.get("oof_directives", {})
+        sdnc_directive = data.get("sdnc_directives", {})
+        template_type = data.get("template_type", None)
+        template_data = data.get("template_data", {})
+        # resp_template = None
+        if not template_type or "heat" != template_type.lower():
+            return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \
+                   "Bad parameters: template type %s is not heat" %\
+                   template_type or ""
+
+        # retrieve the template data
+        template_data = self.openstack_template_update(template_data, vf_module_model_customization_id)
+
+        # update the parameter in order of reverse precedence
+        parameters = template_data.get("parameters", {})
+        parameters = self.param_update_sdnc_directives(parameters, sdnc_directive)
+        parameters = self.param_update_oof_directives(parameters, oof_directive)
+        parameters = self.param_update_user_directives(parameters, user_directive)
+        template_data["parameters"] = parameters
+
+        # reset to make sure "files" are empty
+        template_data["files"] = {}
+
+        template_data["stack_name"] =\
+            template_data.get("stack_name", vf_module_id)
+
+        # authenticate
+        cloud_owner, regionid = extsys.decode_vim_id(vimid)
+        # should go via multicloud proxy so that
+        #  the selflink is updated by multicloud
+        retcode, v2_token_resp_json, os_status = \
+            helper.MultiCloudIdentityHelper(
+                settings.MULTICLOUD_API_V1_PREFIX,
+                cloud_owner, regionid, "/v2.0/tokens",
+                {"Project": project_idorname}
+            )
+        if retcode > 0 or not v2_token_resp_json:
+            errmsg = "authenticate fails:%s,%s, %s" %\
+                     (cloud_owner, regionid, v2_token_resp_json)
+            logger.error(errmsg)
+            return (
+                os_status, "CREATE_FAILED", errmsg
+            )
+
+        # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+        service_type = "orchestration"
+        resource_uri = "/stacks"
+        self._logger.info("create stack resources, URI:%s" % resource_uri)
+        retcode, content, os_status = \
+            helper.MultiCloudServiceHelper(cloud_owner, regionid,
+                                           v2_token_resp_json,
+                                           service_type, resource_uri,
+                                           template_data, "POST")
+
+        if retcode == 0:
+            stack1 = content.get('stack', None)
+            # stackid = stack1["id"] if stack1 else ""
+            return 0, "CREATE_IN_PROGRESS", stack1
+        else:
+            self._logger.info("workload_create fails: %s" % content)
+            return os_status, "CREATE_FAILED", content