-# Copyright (c) 2017-2019 Wind River Systems, Inc.\r
-#\r
-# Licensed under the Apache License, Version 2.0 (the "License");\r
-# you may not use this file except in compliance with the License.\r
-# You may obtain a copy of the License at\r
-#\r
-# http://www.apache.org/licenses/LICENSE-2.0\r
-#\r
-# Unless required by applicable law or agreed to in writing, software\r
-# distributed under the License is distributed on an "AS IS" BASIS,\r
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-# See the License for the specific language governing permissions and\r
-# limitations under the License.\r
-\r
-import logging\r
-import json\r
-import uuid\r
-import traceback\r
-\r
-from keystoneauth1.exceptions import HttpError\r
-from rest_framework import status\r
-from rest_framework.response import Response\r
-from rest_framework.views import APIView\r
-\r
-from common.exceptions import VimDriverNewtonException\r
-from common.msapi import extsys\r
-from common.msapi.helper import MultiCloudThreadHelper\r
-from common.msapi.helper import MultiCloudAAIHelper\r
-from common.utils import restcall\r
-from newton_base.util import VimDriverUtils\r
-from django.conf import settings\r
-\r
-logger = logging.getLogger(__name__)\r
-\r
-\r
-class Registry(APIView):\r
-\r
- def __init__(self):\r
- # logger.debug("Registry __init__: %s" % traceback.format_exc())\r
- if not hasattr(self, "_logger"):\r
- self._logger = logger\r
-\r
- if not hasattr(self, "register_thread"):\r
- # dedicate thread to offload vim registration process\r
- self.register_thread = MultiCloudThreadHelper("vimupdater")\r
-\r
- if not hasattr(self, "register_helper") or not self.register_helper:\r
- if not hasattr(self, "proxy_prefix"):\r
- self.proxy_prefix = "multicloud"\r
- if not hasattr(self, "AAI_BASE_URL"):\r
- self.AAI_BASE_URL = "127.0.0.1"\r
- self.register_helper = RegistryHelper(\r
- self.proxy_prefix or "multicloud",\r
- self.AAI_BASE_URL or "127.0.0.1")\r
-\r
- def post(self, request, vimid=""):\r
- self._logger.info("registration with vimid: %s" % vimid)\r
- self._logger.debug("with data: %s" % request.data)\r
-\r
- try:\r
- # Get the specified tenant id\r
- specified_project_idorname = request.META.get("Project", None)\r
-\r
- # compose the one time backlog item\r
- backlog_item = {\r
- "id": vimid,\r
- "worker": self.register_helper.registryV0,\r
- "payload": (vimid, specified_project_idorname),\r
- "repeat": 0,\r
- "status": (1,\r
- "The registration is on progress")\r
- }\r
- self.register_thread.add(backlog_item)\r
- if 0 == self.register_thread.state():\r
- self.register_thread.start()\r
-\r
- return Response(status=status.HTTP_202_ACCEPTED)\r
-\r
- except VimDriverNewtonException as e:\r
- return Response(data={'error': e.content}, status=e.status_code)\r
- except HttpError as e:\r
- self._logger.error("HttpError: status:%s, response:%s"\r
- % (e.http_status, e.response.json()))\r
- return Response(data=e.response.json(), status=e.http_status)\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return Response(\r
- data={'error': str(e)},\r
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)\r
-\r
- def get(self, request, vimid):\r
- try:\r
- backlog_item = self.register_thread.get(vimid)\r
- if backlog_item:\r
- return Response(\r
- data={'status': backlog_item.get(\r
- "status", "Status not available, vimid: %s" % vimid)},\r
- status=status.HTTP_200_OK)\r
- else:\r
- return Response(\r
- data={\r
- 'error': "Registration process for "\r
- "Cloud Region not found: %s"\r
- % vimid\r
- },\r
- status=status.HTTP_404_NOT_FOUND)\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return Response(\r
- data={'error': str(e)},\r
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)\r
-\r
- def delete(self, request, vimid=""):\r
- self._logger.debug("Registration--delete::data> %s" % request.data)\r
- self._logger.debug("Registration--delete::vimid > %s"% vimid)\r
- try:\r
-\r
- # compose the one time backlog item\r
- backlog_item = {\r
- "id": vimid,\r
- "worker": self.register_helper.unregistryV0,\r
- "payload": (vimid),\r
- "repeat": 0,\r
- "status": (1, "The de-registration is on process")\r
- }\r
- self.register_thread.add(backlog_item)\r
- if 0 == self.register_thread.state():\r
- self.register_thread.start()\r
-\r
- return Response(\r
- status=status.HTTP_204_NO_CONTENT\r
- )\r
- except VimDriverNewtonException as e:\r
- return Response(data={'error': e.content}, status=e.status_code)\r
- except HttpError as e:\r
- self._logger.error("HttpError: status:%s, response:%s"\r
- % (e.http_status, e.response.json()))\r
- return Response(data=e.response.json(), status=e.http_status)\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return Response(data={'error': str(e)},\r
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)\r
-\r
-\r
-\r
-class RegistryHelper(MultiCloudAAIHelper):\r
- '''\r
- Helper code to discover and register a cloud region's resource\r
- '''\r
-\r
- def __init__(self, multicloud_prefix, aai_base_url):\r
- # logger.debug("RegistryHelper __init__: %s" % traceback.format_exc())\r
- self.proxy_prefix = multicloud_prefix\r
- self.aai_base_url = aai_base_url\r
- self._logger = logger\r
- super(RegistryHelper, self).__init__(multicloud_prefix, aai_base_url)\r
-\r
- def registryV1(self, cloud_owner, cloud_region_id):\r
- # cloud_owner = payload.get("cloud-owner", None)\r
- # cloud_region_id = payload.get("cloud-region-id", None)\r
- vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)\r
- return self.registryV0(vimid)\r
-\r
- def registryV0(self, vimid, project_idorname=None):\r
- # populate proxy identity url\r
- self._update_proxy_identity_endpoint(vimid)\r
-\r
- # prepare request resource to vim instance\r
- # get token:\r
- viminfo = VimDriverUtils.get_vim_info(vimid)\r
- sess = None\r
- if not viminfo:\r
- return (\r
- 10,\r
- "Cloud Region not found in AAI: %s" % vimid\r
- )\r
- if project_idorname:\r
- try:\r
- # check if specified with tenant id\r
- sess = VimDriverUtils.get_session(\r
- viminfo, tenant_name=None,\r
- tenant_id=project_idorname\r
- )\r
- except Exception as e:\r
- pass\r
-\r
- if not sess:\r
- try:\r
- # check if specified with tenant name\r
- sess = VimDriverUtils.get_session(\r
- viminfo, tenant_name=project_idorname,\r
- tenant_id=None\r
- )\r
- except Exception as e:\r
- pass\r
-\r
- if not sess:\r
- # set the default tenant since there is no tenant info in the VIM yet\r
- sess = VimDriverUtils.get_session(\r
- viminfo, tenant_name=viminfo.get('tenant', None))\r
-\r
- # step 1. discover all projects and populate into AAI\r
- retcode, status = self._discover_tenants(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return (\r
- # retcode, status\r
- # )\r
-\r
- # discover all flavors\r
- retcode, status = self._discover_flavors(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return (\r
- # retcode, status\r
- # )\r
-\r
- # discover all images\r
- retcode, status = self._discover_images(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return (\r
- # retcode, status\r
- # )\r
-\r
- # discover all az\r
- retcode, status = self._discover_availability_zones(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return (\r
- # retcode, status\r
- # )\r
-\r
- # discover all vg\r
- #self._discover_volumegroups(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return (\r
- # retcode, status\r
- # )\r
-\r
- # discover all snapshots\r
- #self._discover_snapshots(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return retcode, status\r
-\r
- # discover all server groups\r
- #self.discover_servergroups(request, vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return retcode, status\r
-\r
- # discover all pservers\r
- #self._discover_pservers(vimid, sess, viminfo)\r
- # if 0 != retcode:\r
- # return retcode, status\r
-\r
- return (\r
- 0,\r
- "Registration finished for Cloud Region: %s" % vimid\r
- )\r
-\r
- def unregistryV1(self, cloud_owner, cloud_region_id):\r
- # cloud_owner = payload.get("cloud-owner", None)\r
- # cloud_region_id = payload.get("cloud-region-id", None)\r
- vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)\r
- return self.unregistryV0(vimid)\r
-\r
- def unregistryV0(self, vimid):\r
- # prepare request resource to vim instance\r
- # get token:\r
- viminfo = VimDriverUtils.get_vim_info(vimid)\r
- if not viminfo:\r
- return (\r
- 10,\r
- "Cloud Region not found:" % vimid\r
- )\r
-\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
-\r
- # get the resource first\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s?depth=all"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- })\r
-\r
- # get cloud-region\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "GET")\r
-\r
- # add resource-version\r
- cloudregiondata = {}\r
- if retcode == 0 and content:\r
- cloudregiondata = json.JSONDecoder().decode(content)\r
- else:\r
- return (\r
- 10,\r
- "Cloud Region not found: %s, %s" % (cloud_owner, cloud_region_id)\r
- )\r
-\r
- # step 1. remove all tenants\r
- tenants = cloudregiondata.get("tenants", None)\r
- for tenant in tenants.get("tenant", []) if tenants else []:\r
- # common prefix\r
- aai_cloud_region = \\r
- "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \\r
- % (cloud_owner, cloud_region_id, tenant['tenant-id'])\r
-\r
- # remove all vservers\r
- try:\r
- # get list of vservers\r
- vservers = tenant.get('vservers', {}).get('vserver', [])\r
- for vserver in vservers:\r
- try:\r
- # iterate vport, except will be raised if no l-interface exist\r
- for vport in vserver['l-interfaces']['l-interface']:\r
- # delete vport\r
- vport_delete_url =\\r
- aai_cloud_region + \\r
- "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \\r
- % (vserver['vserver-id'], vport['interface-name'],\r
- vport['resource-version'])\r
- restcall.req_to_aai(vport_delete_url, "DELETE")\r
- except Exception as e:\r
- pass\r
-\r
- try:\r
- # delete vserver\r
- vserver_delete_url =\\r
- aai_cloud_region +\\r
- "/vservers/vserver/%s?resource-version=%s" \\r
- % (vserver['vserver-id'],\r
- vserver['resource-version'])\r
- restcall.req_to_aai(vserver_delete_url, "DELETE")\r
- except Exception as e:\r
- continue\r
-\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
- pass\r
-\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"\r
- "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"\r
- "?resource-version=%(resource-version)s"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- "resource_type": "tenant",\r
- "resoure_id": tenant["tenant-id"],\r
- "resource-version": tenant["resource-version"]\r
- })\r
- # remove tenant\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "DELETE")\r
-\r
- # remove all flavors\r
- flavors = cloudregiondata.get("flavors", None)\r
- for flavor in flavors.get("flavor", []) if flavors else []:\r
- # iterate hpa-capabilities\r
- hpa_capabilities = flavor.get("hpa-capabilities", None)\r
- for hpa_capability in hpa_capabilities.get("hpa-capability", [])\\r
- if hpa_capabilities else []:\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"\r
- "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"\r
- "hpa-capabilities/hpa-capability/%(hpa-capability-id)s/"\r
- "?resource-version=%(resource-version)s"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- "resource_type": "flavor",\r
- "resoure_id": flavor["flavor-id"],\r
- "hpa-capability-id": hpa_capability["hpa-capability-id"],\r
- "resource-version": hpa_capability["resource-version"]\r
- })\r
- # remove hpa-capability\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "DELETE")\r
-\r
- # remove flavor\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"\r
- "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"\r
- "?resource-version=%(resource-version)s"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- "resource_type": "flavor",\r
- "resoure_id": flavor["flavor-id"],\r
- "resource-version": flavor["resource-version"]\r
- })\r
-\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "DELETE")\r
-\r
- # remove all images\r
- images = cloudregiondata.get("images", None)\r
- for image in images.get("image", []) if images else []:\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"\r
- "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"\r
- "?resource-version=%(resource-version)s"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- "resource_type": "image",\r
- "resoure_id": image["image-id"],\r
- "resource-version": image["resource-version"]\r
- })\r
- # remove image\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "DELETE")\r
-\r
- # remove all az\r
-\r
- # remove all vg\r
-\r
- # remove all snapshots\r
- snapshots = cloudregiondata.get("snapshots", None)\r
- for snapshot in snapshots.get("snapshot", []) if snapshots else []:\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"\r
- "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"\r
- "?resource-version=%(resource-version)s"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- "resource_type": "snapshot",\r
- "resoure_id": snapshot["snapshot-id"],\r
- "resource-version": snapshot["resource-version"]\r
- })\r
- # remove snapshot\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "DELETE")\r
-\r
- # remove all server groups\r
-\r
- # remove all pservers\r
-\r
- # remove cloud region itself\r
- resource_url = ("/cloud-infrastructure/cloud-regions/"\r
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s"\r
- "?resource-version=%(resource-version)s"\r
- % {\r
- "cloud_owner": cloud_owner,\r
- "cloud_region_id": cloud_region_id,\r
- "resource-version": cloudregiondata["resource-version"]\r
- })\r
- # remove cloud region\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "DELETE")\r
-\r
- return retcode, content\r
-\r
- def _discover_tenants(self, vimid="", session=None, viminfo=None):\r
- try:\r
- # iterate all projects and populate them into AAI\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- for tenant in self._get_list_resources(\r
- "projects", "identity", session, viminfo, vimid,\r
- "projects"):\r
- tenant_info = {\r
- 'tenant-id': tenant['id'],\r
- 'tenant-name': tenant['name'],\r
- }\r
- self._update_resoure(\r
- cloud_owner, cloud_region_id, tenant['id'],\r
- tenant_info, "tenant")\r
- return 0, "succeed"\r
- except VimDriverNewtonException as e:\r
- self._logger.error(\r
- "VimDriverNewtonException: status:%s, response:%s"\r
- % (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- if e.http_status == status.HTTP_403_FORBIDDEN:\r
- ### get the tenant information from the token response\r
- try:\r
- ### get tenant info from the session\r
- tmp_auth_state = VimDriverUtils.get_auth_state(session)\r
- tmp_auth_info = json.loads(tmp_auth_state)\r
- tmp_auth_data = tmp_auth_info['body']\r
- tenant = tmp_auth_data['token']['project']\r
- tenant_info = {\r
- 'tenant-id': tenant['id'],\r
- 'tenant-name': tenant['name'],\r
- }\r
-\r
- self._update_resoure(\r
- cloud_owner, cloud_region_id, tenant['id'],\r
- tenant_info, "tenant")\r
-\r
- return 0, "succeed"\r
-\r
- except Exception as ex:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11,\r
- str(ex)\r
- )\r
- else:\r
- self._logger.error(\r
- "HttpError: status:%s, response:%s"\r
- % (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11,\r
- str(e)\r
- )\r
-\r
- def _discover_flavors(self, vimid="", session=None, viminfo=None):\r
- try:\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- for flavor in self._get_list_resources(\r
- "/flavors/detail", "compute", session, viminfo, vimid,\r
- "flavors"):\r
- flavor_info = {\r
- 'flavor-id': flavor['id'],\r
- 'flavor-name': flavor['name'],\r
- 'flavor-vcpus': flavor['vcpus'],\r
- 'flavor-ram': flavor['ram'],\r
- 'flavor-disk': flavor['disk'],\r
- 'flavor-ephemeral': flavor['OS-FLV-EXT-DATA:ephemeral'],\r
- 'flavor-swap': flavor['swap'],\r
- 'flavor-is-public': flavor['os-flavor-access:is_public'],\r
- 'flavor-disabled': flavor['OS-FLV-DISABLED:disabled'],\r
- }\r
-\r
- if flavor.get('links') and len(flavor['links']) > 0:\r
- flavor_info['flavor-selflink'] =\\r
- flavor['links'][0]['href'] or 'http://0.0.0.0'\r
- else:\r
- flavor_info['flavor-selflink'] = 'http://0.0.0.0'\r
-\r
- # add hpa capabilities\r
- if (flavor['name'].find('onap.') == 0):\r
- req_resouce = "/flavors/%s/os-extra_specs" % flavor['id']\r
- extraResp = self._get_list_resources(\r
- req_resouce, "compute", session,\r
- viminfo, vimid, "extra_specs")\r
-\r
- hpa_capabilities =\\r
- self._get_hpa_capabilities(flavor, extraResp, viminfo)\r
- flavor_info['hpa-capabilities'] = \\r
- {'hpa-capability': hpa_capabilities}\r
-\r
- retcode, content = self._update_resoure(\r
- cloud_owner, cloud_region_id, flavor['id'],\r
- flavor_info, "flavor")\r
-\r
- return (0, "succeed")\r
- except VimDriverNewtonException as e:\r
- self._logger.error(\r
- "VimDriverNewtonException: status:%s, response:%s" %\r
- (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- self._logger.error("HttpError: status:%s, response:%s" %\r
- (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
- def _get_hpa_capabilities(self, flavor, extra_specs, viminfo):\r
- hpa_caps = []\r
-\r
- # Basic capabilties\r
- caps_dict = self._get_hpa_basic_capabilities(flavor)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("basic_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # cpupining capabilities\r
- caps_dict = self._get_cpupining_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("cpupining_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # cputopology capabilities\r
- caps_dict = self._get_cputopology_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("cputopology_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # hugepages capabilities\r
- caps_dict = self._get_hugepages_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("hugepages_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # numa capabilities\r
- caps_dict = self._get_numa_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("numa_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # storage capabilities\r
- caps_dict = self._get_storage_capabilities(flavor)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("storage_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # CPU instruction set extension capabilities\r
- caps_dict = self._get_instruction_set_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("instruction_set_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # PCI passthrough capabilities\r
- caps_dict = self._get_pci_passthrough_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("pci_passthrough_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # SRIOV-NIC capabilities\r
- caps_dict = self._get_sriov_nic_capabilities(extra_specs)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("sriov_nic_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- # ovsdpdk capabilities\r
- caps_dict = self._get_ovsdpdk_capabilities(extra_specs, viminfo)\r
- if len(caps_dict) > 0:\r
- self._logger.debug("ovsdpdk_capabilities_info: %s" % caps_dict)\r
- hpa_caps.append(caps_dict)\r
-\r
- logger.debug("hpa_caps:%s" % hpa_caps)\r
- return hpa_caps\r
-\r
- def _get_hpa_basic_capabilities(self, flavor):\r
- basic_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- basic_capability['hpa-capability-id'] = str(feature_uuid)\r
- basic_capability['hpa-feature'] = 'basicCapabilities'\r
- basic_capability['architecture'] = 'generic'\r
- basic_capability['hpa-version'] = 'v1'\r
-\r
- basic_capability['hpa-feature-attributes'] = []\r
- basic_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'numVirtualCpu',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(flavor['vcpus'])\r
- })\r
- basic_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key':'virtualMemSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(flavor['ram'],"MB")\r
- })\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
- return basic_capability\r
-\r
- def _get_cpupining_capabilities(self, extra_specs):\r
- cpupining_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- if 'hw:cpu_policy' in extra_specs\\r
- or 'hw:cpu_thread_policy' in extra_specs:\r
- cpupining_capability['hpa-capability-id'] = str(feature_uuid)\r
- cpupining_capability['hpa-feature'] = 'cpuPinning'\r
- cpupining_capability['architecture'] = 'generic'\r
- cpupining_capability['hpa-version'] = 'v1'\r
-\r
- cpupining_capability['hpa-feature-attributes'] = []\r
- if 'hw:cpu_thread_policy' in extra_specs:\r
- cpupining_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'logicalCpuThreadPinningPolicy',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(\r
- extra_specs['hw:cpu_thread_policy'])\r
- })\r
- if 'hw:cpu_policy' in extra_specs:\r
- cpupining_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key':'logicalCpuPinningPolicy',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(\r
- extra_specs['hw:cpu_policy'])\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return cpupining_capability\r
-\r
- def _get_cputopology_capabilities(self, extra_specs):\r
- cputopology_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- if 'hw:cpu_sockets' in extra_specs\\r
- or 'hw:cpu_cores' in extra_specs\\r
- or 'hw:cpu_threads' in extra_specs:\r
- cputopology_capability['hpa-capability-id'] = str(feature_uuid)\r
- cputopology_capability['hpa-feature'] = 'cpuTopology'\r
- cputopology_capability['architecture'] = 'generic'\r
- cputopology_capability['hpa-version'] = 'v1'\r
-\r
- cputopology_capability['hpa-feature-attributes'] = []\r
- if 'hw:cpu_sockets' in extra_specs:\r
- cputopology_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'numCpuSockets',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:cpu_sockets'])\r
- })\r
- if 'hw:cpu_cores' in extra_specs:\r
- cputopology_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'numCpuCores',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:cpu_cores'])\r
- })\r
- if 'hw:cpu_threads' in extra_specs:\r
- cputopology_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'numCpuThreads',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:cpu_threads'])\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return cputopology_capability\r
-\r
- def _get_hugepages_capabilities(self, extra_specs):\r
- hugepages_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- if 'hw:mem_page_size' in extra_specs:\r
- hugepages_capability['hpa-capability-id'] = str(feature_uuid)\r
- hugepages_capability['hpa-feature'] = 'hugePages'\r
- hugepages_capability['architecture'] = 'generic'\r
- hugepages_capability['hpa-version'] = 'v1'\r
-\r
- hugepages_capability['hpa-feature-attributes'] = []\r
- if extra_specs['hw:mem_page_size'] == 'large':\r
- hugepages_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'memoryPageSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(2,"MB")\r
- })\r
- elif extra_specs['hw:mem_page_size'] == 'small':\r
- hugepages_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'memoryPageSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(4,"KB")\r
- })\r
- elif extra_specs['hw:mem_page_size'] == 'any':\r
- self._logger.info("Currently HPA feature memoryPageSize did not support 'any' page!!")\r
- else :\r
- hugepages_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'memoryPageSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(extra_specs['hw:mem_page_size'],"KB")\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return hugepages_capability\r
-\r
- def _get_numa_capabilities(self, extra_specs):\r
- numa_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- if 'hw:numa_nodes' in extra_specs:\r
- numa_capability['hpa-capability-id'] = str(feature_uuid)\r
- numa_capability['hpa-feature'] = 'numa'\r
- numa_capability['architecture'] = 'generic'\r
- numa_capability['hpa-version'] = 'v1'\r
-\r
- numa_capability['hpa-feature-attributes'] = []\r
- numa_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'numaNodes',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:numa_nodes'] or 0)\r
- })\r
-\r
- for num in range(0, int(extra_specs['hw:numa_nodes'])):\r
- numa_cpu_node = "hw:numa_cpus.%s" % num\r
- numa_mem_node = "hw:numa_mem.%s" % num\r
- numacpu_key = "numaCpu-%s" % num\r
- numamem_key = "numaMem-%s" % num\r
-\r
- if numa_cpu_node in extra_specs and numa_mem_node in extra_specs:\r
- numa_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': numacpu_key,\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(extra_specs[numa_cpu_node])\r
- })\r
- numa_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': numamem_key,\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(extra_specs[numa_mem_node],"MB")\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return numa_capability\r
-\r
- def _get_storage_capabilities(self, flavor):\r
- storage_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- storage_capability['hpa-capability-id'] = str(feature_uuid)\r
- storage_capability['hpa-feature'] = 'localStorage'\r
- storage_capability['architecture'] = 'generic'\r
- storage_capability['hpa-version'] = 'v1'\r
-\r
- storage_capability['hpa-feature-attributes'] = []\r
- storage_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'diskSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(\r
- flavor['disk'] or 0, "GB")\r
- })\r
- storage_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'swapMemSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(\r
- flavor['swap'] or 0, "MB")\r
- })\r
- storage_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'ephemeralDiskSize',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(\r
- flavor['OS-FLV-EXT-DATA:ephemeral'] or 0, "GB")\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return storage_capability\r
-\r
- def _get_instruction_set_capabilities(self, extra_specs):\r
- instruction_capability = {}\r
- feature_uuid = uuid.uuid4()\r
- try:\r
- if 'hw:capabilities:cpu_info:features' in extra_specs:\r
- instruction_capability['hpa-capability-id'] = str(feature_uuid)\r
- instruction_capability['hpa-feature'] = 'instructionSetExtensions'\r
- instruction_capability['architecture'] = 'Intel64'\r
- instruction_capability['hpa-version'] = 'v1'\r
-\r
- instruction_capability['hpa-feature-attributes'] = []\r
- instruction_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'instructionSetExtensions',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(\r
- extra_specs['hw:capabilities:cpu_info:features'])\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return instruction_capability\r
-\r
- def _get_pci_passthrough_capabilities(self, extra_specs):\r
- pci_passthrough_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
-\r
- if 'pci_passthrough:alias' in extra_specs:\r
- value1 = extra_specs['pci_passthrough:alias'].split(':')\r
- value2 = value1[0].split('-')\r
-\r
- pci_passthrough_capability['hpa-capability-id'] = str(feature_uuid)\r
- pci_passthrough_capability['hpa-feature'] = 'pciePassthrough'\r
- pci_passthrough_capability['architecture'] = str(value2[2])\r
- pci_passthrough_capability['hpa-version'] = 'v1'\r
-\r
-\r
- pci_passthrough_capability['hpa-feature-attributes'] = []\r
- pci_passthrough_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'pciCount',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value1[1])\r
- })\r
- pci_passthrough_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'pciVendorId',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value2[3])\r
- })\r
- pci_passthrough_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'pciDeviceId',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value2[4])\r
- })\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return pci_passthrough_capability\r
-\r
- def _get_sriov_nic_capabilities(self, extra_specs):\r
- sriov_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- if 'aggregate_instance_extra_specs:sriov_nic' in extra_specs:\r
- value1 = extra_specs['aggregate_instance_extra_specs:sriov_nic'].split(':')\r
- value2 = value1[0].split('-', 5)\r
-\r
- sriov_capability['hpa-capability-id'] = str(feature_uuid)\r
- sriov_capability['hpa-feature'] = 'sriovNICNetwork'\r
- sriov_capability['architecture'] = str(value2[2])\r
- sriov_capability['hpa-version'] = 'v1'\r
-\r
- sriov_capability['hpa-feature-attributes'] = []\r
- sriov_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'pciCount',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value1[1])})\r
- sriov_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'pciVendorId',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value2[3])})\r
- sriov_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'pciDeviceId',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value2[4])})\r
- sriov_capability['hpa-feature-attributes'].append(\r
- {'hpa-attribute-key': 'physicalNetwork',\r
- 'hpa-attribute-value':\r
- '{{\"value\":\"{0}\"}}'.format(value2[5])})\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return sriov_capability\r
-\r
- def _get_ovsdpdk_capabilities(self, extra_specs, viminfo):\r
- ovsdpdk_capability = {}\r
- feature_uuid = uuid.uuid4()\r
-\r
- try:\r
- cloud_extra_info_str = viminfo.get('cloud_extra_info')\r
- if not isinstance(cloud_extra_info_str, dict):\r
- try:\r
- cloud_extra_info_str = json.loads(cloud_extra_info_str)\r
- except Exception as ex:\r
- logger.error("Can not convert cloud extra info %s %s" % (\r
- str(ex), cloud_extra_info_str))\r
- return {}\r
- if cloud_extra_info_str :\r
- cloud_dpdk_info = cloud_extra_info_str.get("ovsDpdk")\r
- if cloud_dpdk_info :\r
- ovsdpdk_capability['hpa-capability-id'] = str(feature_uuid)\r
- ovsdpdk_capability['hpa-feature'] = 'ovsDpdk'\r
- ovsdpdk_capability['architecture'] = 'Intel64'\r
- ovsdpdk_capability['hpa-version'] = 'v1'\r
-\r
- ovsdpdk_capability['hpa-feature-attributes'] = [\r
- {\r
- 'hpa-attribute-key': str(cloud_dpdk_info.get("libname")),\r
- 'hpa-attribute-value': '{{\"value\":\"{0}\"}}'.format(\r
- cloud_dpdk_info.get("libversion"))\r
- },]\r
- except Exception:\r
- self._logger.error(traceback.format_exc())\r
-\r
- return ovsdpdk_capability\r
-\r
- # def update_image_metadata(self, cloud_owner, cloud_region_id, image_id, metadatainfo):\r
- # '''\r
- # populate image meta data\r
- # :param cloud_owner:\r
- # :param cloud_region_id:\r
- # :param image_id:\r
- # :param metadatainfo:\r
- # metaname: string\r
- # metaval: string\r
- # :return:\r
- # '''\r
- #\r
- # if cloud_owner and cloud_region_id:\r
- # retcode, content, status_code = \\r
- # restcall.req_to_aai(\r
- # "/cloud-infrastructure/cloud-regions/cloud-region"\r
- # + "/%s/%s/images/image/%s/metadata/metadatum/%s"\r
- # % (cloud_owner, cloud_region_id, image_id, metadatainfo['metaname']),\r
- # "PUT", content=metadatainfo)\r
- #\r
- # self._logger.debug("update_image,vimid:%s_%s req_to_aai: %s/%s, return %s, %s, %s"\r
- # % (cloud_owner,cloud_region_id,image_id,metadatainfo['metaname'],\r
- # retcode, content, status_code))\r
- # return retcode\r
- # return 1\r
-\r
- def _discover_images(self, vimid="", session=None, viminfo=None):\r
- try:\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- for image in self._get_list_resources(\r
- "/v2/images", "image", session, viminfo, vimid,\r
- "images"):\r
- image_info = {\r
- 'image-id': image['id'],\r
- 'image-name': image['name'],\r
- 'image-selflink': image['self'],\r
-\r
- 'image-os-distro': image.get('os_distro') or 'Unknown',\r
- 'image-os-version': image.get('os_version') or 'Unknown',\r
- 'application': image.get('application'),\r
- 'application-vendor': image.get('application_vendor'),\r
- 'application-version': image.get('application_version'),\r
- 'image-architecture': image.get('architecture'),\r
- }\r
-\r
- ret = self._update_resoure(\r
- cloud_owner, cloud_region_id, image['id'], image_info,\r
- "image")\r
- if ret != 0:\r
- # failed to update image\r
- self._logger.debug(\r
- "failed to populate image info into AAI: %s,"\r
- " image id: %s, ret:%s"\r
- % (vimid, image_info['image-id'], ret))\r
- continue\r
-\r
- schema = image['schema']\r
- if schema:\r
- req_resource = schema\r
- service = {'service_type': "image",\r
- 'interface': 'public',\r
- 'region_name': viminfo['openstack_region_id']\r
- if viminfo.get('openstack_region_id')\r
- else viminfo['cloud_region_id']\r
- }\r
-\r
- self._logger.info("making request with URI:%s" %\r
- req_resource)\r
- resp = session.get(req_resource, endpoint_filter=service)\r
- self._logger.info("request returns with status %s" %\r
- resp.status_code)\r
- if resp.status_code == status.HTTP_200_OK:\r
- self._logger.debug("with content:%s" %\r
- resp.json())\r
- pass\r
- content = resp.json()\r
-\r
- # if resp.status_code == status.HTTP_200_OK:\r
- # parse the schema? TBD\r
- # self.update_image(cloud_owner, cloud_region_id, image_info)\r
- #metadata_info = {}\r
- return (0, "succeed")\r
- except VimDriverNewtonException as e:\r
- self._logger.error("VimDriverNewtonException:"\r
- " status:%s, response:%s" %\r
- (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- self._logger.error("HttpError: status:%s, response:%s" %\r
- (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
- def _discover_availability_zones(self, vimid="", session=None,\r
- viminfo=None):\r
- try:\r
- az_pserver_info = {}\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- for az in self._get_list_resources(\r
- "/os-availability-zone/detail", "compute", session,\r
- viminfo, vimid,\r
- "availabilityZoneInfo"):\r
- az_info = {\r
- 'availability-zone-name': az['zoneName'],\r
- 'operational-status': az['zoneState']['available']\r
- if az.get('zoneState') else '',\r
- 'hypervisor-type': '',\r
- }\r
- # filter out the default az: "internal" and "nova"\r
- azName = az.get('zoneName', None)\r
- # comment it for test the registration process only\r
- # if azName == 'nova':\r
- # continue\r
- if azName == 'internal':\r
- continue\r
-\r
- # get list of host names\r
- pservers_info = [k for (k, v) in list(az['hosts'].items())]\r
- # set the association between az and pservers\r
- az_pserver_info[azName] = pservers_info\r
-\r
- az_info['hypervisor-type'] = 'QEMU' # default for OpenStack\r
-\r
- ret, content = self._update_resoure(\r
- cloud_owner, cloud_region_id, az['zoneName'], az_info,\r
- "availability-zone")\r
- if ret != 0:\r
- # failed to update image\r
- self._logger.debug(\r
- "failed to populate az info into AAI: "\r
- "%s, az name: %s, ret:%s"\r
- % (vimid, az_info['availability-zone-name'], ret))\r
- # return (\r
- # ret,\r
- # "fail to popluate az info into AAI:%s" % content\r
- # )\r
- continue\r
-\r
- # populate pservers:\r
- for hostname in pservers_info:\r
- if hostname == "":\r
- continue\r
-\r
- pservername = vimid+"_"+hostname\r
- selflink = ""\r
- # if self.proxy_prefix[3:] == "/v1":\r
- # selflink = "%s/%s/%s/compute/os-hypervisors/detail?hypervisor_hostname_pattern=%s"%\\r
- # (self.proxy_prefix, cloud_owner, cloud_region_id , hostname)\r
- # else:\r
- # selflink = "%s/%s/compute/os-hypervisors/detail?hypervisor_hostname_pattern=%s" % \\r
- # (self.proxy_prefix, vimid, hostname)\r
-\r
- pinfo = {\r
- "hostname": pservername,\r
- "server-selflink": selflink,\r
- "pserver-id": hostname\r
- }\r
- self._update_pserver(cloud_owner, cloud_region_id, pinfo)\r
- self._update_pserver_relation_az(cloud_owner, cloud_region_id, pinfo, azName)\r
- self._update_pserver_relation_cloudregion(cloud_owner, cloud_region_id, pinfo)\r
-\r
- return (0, az_pserver_info)\r
- except VimDriverNewtonException as e:\r
- self._logger.error(\r
- "VimDriverNewtonException: status:%s,"\r
- " response:%s" % (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- self._logger.error(\r
- "HttpError: status:%s, response:%s" %\r
- (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
- # def _discover_volumegroups(self, vimid="", session=None, viminfo=None):\r
- # cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- # for cg in self._get_list_resources(\r
- # "/consistencygroups/detail", "volumev3", session,\r
- # viminfo, vimid,\r
- # "consistencygroups"):\r
- # vg_info = {\r
- # 'volume-group-id': cg['id'],\r
- # 'volume-group-name': cg['name'],\r
- # 'vnf-type': '',\r
- # }\r
- #\r
- # ret = self._update_resoure(\r
- # cloud_owner, cloud_region_id, cg['id'], vg_info,\r
- # "volume-group")\r
- # if ret != 0:\r
- # # failed to update image\r
- # self._logger.debug("failed to populate volumegroup info into AAI: %s, volume-group-id: %s, ret:%s"\r
- # % (vimid, vg_info['volume-group-id'], ret))\r
-\r
- def _discover_snapshots(self, vimid="", session=None, viminfo=None):\r
- try:\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- for ss in self._get_list_resources(\r
- "/snapshots/detail", "volumev3", session,\r
- viminfo, vimid,\r
- "snapshots"):\r
- snapshot_info = {\r
- 'snapshot-id': ss['id'],\r
- 'snapshot-name': ss['name'],\r
- }\r
- if ss.get('metadata'):\r
- snapshot_info['snapshot-architecture'] = ss['metadata'].get('architecture')\r
- snapshot_info['application'] = ss['metadata'].get('architecture')\r
- snapshot_info['snapshot-os-distro'] = ss['metadata'].get('os-distro')\r
- snapshot_info['snapshot-os-version'] = ss['metadata'].get('os-version')\r
- snapshot_info['application-vendor'] = ss['metadata'].get('vendor')\r
- snapshot_info['application-version'] = ss['metadata'].get('version')\r
- snapshot_info['snapshot-selflink'] = ss['metadata'].get('selflink')\r
- snapshot_info['prev-snapshot-id'] = ss['metadata'].get('prev-snapshot-id')\r
-\r
- ret, content = self._update_resoure(\r
- cloud_owner, cloud_region_id, ss['id'], snapshot_info,\r
- "snapshot")\r
- if ret != 0:\r
- # failed to update image\r
- self._logger.debug("failed to populate snapshot info into AAI: %s, snapshot-id: %s, ret:%s"\r
- % (vimid, snapshot_info['snapshot-id'], ret))\r
- return (\r
- ret,\r
- "fail to populate snapshot into AAI:%s" % content\r
- )\r
- return 0, "Succeed"\r
- except VimDriverNewtonException as e:\r
- self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
- # def _discover_servergroups(self, vimid="", session=None, viminfo=None):\r
- # for sg in self._get_list_resources(\r
- # "/os-server-groups", "compute", session,\r
- # viminfo, vimid,\r
- # "security groups"):\r
-\r
- def _update_pserver_relation_az(self, cloud_owner, cloud_region_id, pserverinfo, azName):\r
- related_link = \\r
- "/aai/%s/cloud-infrastructure/cloud-regions/"\\r
- "cloud-region/%s/%s/"\\r
- "availability-zones/availability-zone/%s" % (\r
- settings.AAI_SCHEMA_VERSION, cloud_owner,\r
- cloud_region_id, azName)\r
-\r
- relationship_data = \\r
- {\r
- 'related-to': 'availability-zone',\r
- 'related-link': related_link,\r
- 'relationship-data': [\r
- {\r
- 'relationship-key': 'availability-zone.availability-zone-name',\r
- 'relationship-value': azName\r
- }\r
- ],\r
- "related-to-property": [\r
- {\r
- "property-key": "availability-zone.availability-zone-name"\r
- }\r
- ]\r
- }\r
-\r
- retcode, content, status_code = \\r
- restcall.req_to_aai("/cloud-infrastructure/pservers/pserver/%s"\r
- "/relationship-list/relationship"\r
- % (pserverinfo['hostname']), "PUT",\r
- content=relationship_data)\r
-\r
- self._logger.debug("update_pserver_az_relation,vimid:%s_%s, "\r
- "az:%s req_to_aai: %s, return %s, %s, %s"\r
- % (cloud_owner, cloud_region_id, azName,\r
- pserverinfo['hostname'], retcode, content,\r
- status_code))\r
- return (\r
- 0,\r
- "succeed"\r
- )\r
-\r
- def _update_pserver_relation_cloudregion(\r
- self,\r
- cloud_owner,\r
- cloud_region_id,\r
- pserverinfo\r
- ):\r
- related_link = \\r
- "/aai/%s/cloud-infrastructure/cloud-regions/"\\r
- "cloud-region/%s/%s" % (\r
- settings.AAI_SCHEMA_VERSION, cloud_owner,\r
- cloud_region_id)\r
-\r
- relationship_data = \\r
- {\r
- 'related-to': 'cloud-region',\r
- 'related-link': related_link,\r
- 'relationship-data': [\r
- {\r
- 'relationship-key': 'cloud-region.cloud-owner',\r
- 'relationship-value': cloud_owner\r
- },\r
- {\r
- 'relationship-key': 'cloud-region.cloud-region-id',\r
- 'relationship-value': cloud_region_id\r
- }\r
- ],\r
- "related-to-property": [\r
- {\r
- "property-key": "cloud-region.cloud-owner"\r
- },\r
- {\r
- "property-key": "cloud-region.cloud-region-id"\r
- }\r
- ]\r
- }\r
-\r
- retcode, content, status_code = \\r
- restcall.req_to_aai("/cloud-infrastructure/pservers/pserver"\r
- "/%s/relationship-list/relationship"\r
- % (pserverinfo['hostname']), "PUT",\r
- content=relationship_data)\r
-\r
- self._logger.debug("update_pserver_cloudregion_relation,vimid:%s_%s"\r
- " req_to_aai: %s, return %s, %s, %s"\r
- % (cloud_owner, cloud_region_id,\r
- pserverinfo['hostname'], retcode, content,\r
- status_code))\r
- return (\r
- 0,\r
- "succeed"\r
- )\r
-\r
- def _update_pserver(self, cloud_owner, cloud_region_id, pserverinfo):\r
- '''\r
- populate pserver into AAI\r
- :param cloud_owner:\r
- :param cloud_region_id:\r
- :param pserverinfo:\r
- hostname: string\r
- in-maint: boolean\r
-\r
- pserver-name2: string\r
- pserver-id: string\r
- ptnii-equip-name: string\r
- number-of-cpus: integer\r
- disk-in-gigabytes: integer\r
- ram-in-megabytes: integer\r
- equip-type: string\r
- equip-vendor: string\r
- equip-model: string\r
- fqdn: string\r
- pserver-selflink: string\r
- ipv4-oam-address: string\r
- serial-number: string\r
- ipaddress-v4-loopback-0: string\r
- ipaddress-v6-loopback-0: string\r
- ipaddress-v4-aim: string\r
- ipaddress-v6-aim: string\r
- ipaddress-v6-oam: string\r
- inv-status: string\r
- internet-topology: string\r
- purpose: string\r
- prov-status: string\r
- management-option: string\r
- host-profile: string\r
-\r
- :return:\r
- '''\r
-\r
- if cloud_owner and cloud_region_id:\r
- resource_url = "/cloud-infrastructure/pservers/pserver/%s" \\r
- % (pserverinfo['hostname'])\r
-\r
- # get cloud-region\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "GET")\r
-\r
- # add resource-version to url\r
- if retcode == 0 and content:\r
- content = json.JSONDecoder().decode(content)\r
- #pserverinfo["resource-version"] = content["resource-version"]\r
- content.update(pserverinfo)\r
- pserverinfo = content\r
-\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "PUT", content=pserverinfo)\r
-\r
- self._logger.debug(\r
- "update_snapshot,vimid:%s_%s req_to_aai: %s,"\r
- " return %s, %s, %s" % (\r
- cloud_owner, cloud_region_id,\r
- pserverinfo['hostname'],\r
- retcode, content, status_code))\r
-\r
- return retcode, content\r
- else:\r
- # unknown cloud owner,region_id\r
- return (\r
- 10,\r
- "Cloud Region not found: %s,%s"\r
- % (cloud_owner, cloud_region_id)\r
- )\r
-\r
- def _discover_pservers(self, vimid="", session=None, viminfo=None):\r
- try:\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- for hypervisor in self._get_list_resources(\r
- "/os-hypervisors/detail", "compute", session,\r
- viminfo, vimid,\r
- "hypervisors"):\r
- hypervisor_info = {\r
- 'hostname': hypervisor['hypervisor_hostname'],\r
- 'in-maint': hypervisor['state'],\r
-\r
- 'pserver-id': hypervisor.get('id'),\r
- 'ptnii-equip-name': hypervisor.get('id'),\r
- 'disk-in-gigabytes': hypervisor.get('local_gb'),\r
- 'ram-in-megabytes': hypervisor.get('memory_mb'),\r
- 'pserver-selflink': hypervisor.get('hypervisor_links'),\r
- 'ipv4-oam-address': hypervisor.get('host_ip'),\r
- }\r
-\r
- if hypervisor.get('cpu_info'):\r
- cpu_info = json.loads(hypervisor['cpu_info'])\r
- if cpu_info.get('topology'):\r
- cputopo = cpu_info.get('topology')\r
- n_cpus = cputopo['cores'] * cputopo['threads'] * cputopo['sockets']\r
- hypervisor_info['number-of-cpus'] = n_cpus\r
-\r
- ret, content = self._update_pserver(cloud_owner, cloud_region_id,\r
- hypervisor_info)\r
- if ret != 0:\r
- # failed to update image\r
- self._logger.debug(\r
- "failed to populate pserver info into AAI:"\r
- " %s, hostname: %s, ret:%s"\r
- % (vimid, hypervisor_info['hostname'], ret))\r
- return ret, "fail to update pserver to AAI:%s" % content\r
-\r
- return 0, "succeed"\r
- except VimDriverNewtonException as e:\r
- self._logger.error(\r
- "VimDriverNewtonException: status:%s, response:%s"\r
- % (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- self._logger.error(\r
- "HttpError: status:%s, response:%s"\r
- % (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
- def _update_proxy_identity_endpoint(self, vimid):\r
- '''\r
- update cloud_region's identity url\r
- :param cloud_owner:\r
- :param cloud_region_id:\r
- :param url:\r
- :return:\r
- '''\r
- try:\r
- cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)\r
- if cloud_owner and cloud_region_id:\r
- resource_url = \\r
- "/cloud-infrastructure/cloud-regions" \\r
- "/cloud-region/%s/%s" \\r
- % (cloud_owner, cloud_region_id)\r
-\r
- # get cloud-region\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(resource_url, "GET")\r
-\r
- # add resource-version to url\r
- if retcode == 0 and content:\r
- viminfo = json.JSONDecoder().decode(content)\r
- viminfo['identity-url'] =\\r
- self.proxy_prefix + "/%s/identity/v2.0" % vimid \\r
- if self.proxy_prefix[-3:] == "/v0" \\r
- else self.proxy_prefix +\\r
- "/%s/%s/identity/v2.0"\\r
- % extsys.decode_vim_id(vimid)\r
-\r
- retcode, content, status_code = \\r
- restcall.req_to_aai(\r
- "/cloud-infrastructure/cloud-regions"\r
- "/cloud-region/%s/%s"\r
- % (cloud_owner, cloud_region_id), "PUT",\r
- content=viminfo)\r
-\r
- self._logger.debug(\r
- "update_proxy_identity_endpoint,vimid:"\r
- "%s req_to_aai: %s, return %s, %s, %s"\r
- % (vimid, viminfo['identity-url'],\r
- retcode, content, status_code))\r
- return 0, "succeed"\r
- else:\r
- self._logger.debug(\r
- "failure: update_proxy_identity_endpoint,vimid:"\r
- "%s req_to_aai: return %s, %s, %s"\r
- % (vimid, retcode, content, status_code))\r
- return retcode, content\r
- else:\r
- return (\r
- 10,\r
- "Cloud Region not found: %s" % vimid\r
- )\r
-\r
- except VimDriverNewtonException as e:\r
- self._logger.error(\r
- "VimDriverNewtonException: status:%s, response:%s"\r
- % (e.http_status, e.content))\r
- return (\r
- e.http_status, e.content\r
- )\r
- except HttpError as e:\r
- self._logger.error(\r
- "HttpError: status:%s, response:%s"\r
- % (e.http_status, e.response.json()))\r
- return (\r
- e.http_status, e.response.json()\r
- )\r
- except Exception as e:\r
- self._logger.error(traceback.format_exc())\r
- return (\r
- 11, str(e)\r
- )\r
-\r
+# Copyright (c) 2017-2019 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+import uuid
+import traceback
+
+from keystoneauth1.exceptions import HttpError
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+
+from common.exceptions import VimDriverNewtonException
+from common.msapi import extsys
+from common.msapi.helper import MultiCloudThreadHelper
+from common.msapi.helper import MultiCloudAAIHelper
+from common.utils import restcall
+from newton_base.util import VimDriverUtils
+from django.conf import settings
+
+logger = logging.getLogger(__name__)
+
+
+class Registry(APIView):
+
+ def __init__(self):
+ # logger.debug("Registry __init__: %s" % traceback.format_exc())
+ if not hasattr(self, "_logger"):
+ self._logger = logger
+
+ if not hasattr(self, "register_thread"):
+ # dedicate thread to offload vim registration process
+ self.register_thread = MultiCloudThreadHelper("vimupdater")
+
+ if not hasattr(self, "register_helper") or not self.register_helper:
+ if not hasattr(self, "proxy_prefix"):
+ self.proxy_prefix = "multicloud"
+ if not hasattr(self, "AAI_BASE_URL"):
+ self.AAI_BASE_URL = "127.0.0.1"
+ self.register_helper = RegistryHelper(
+ self.proxy_prefix or "multicloud",
+ self.AAI_BASE_URL or "127.0.0.1")
+
+ def post(self, request, vimid=""):
+ self._logger.info("registration with vimid: %s" % vimid)
+ self._logger.debug("with data: %s" % request.data)
+
+ try:
+ # Get the specified tenant id
+ specified_project_idorname = request.META.get("Project", None)
+
+ # compose the one time backlog item
+ backlog_item = {
+ "id": vimid,
+ "worker": self.register_helper.registryV0,
+ "payload": (vimid, specified_project_idorname),
+ "repeat": 0,
+ "status": (1,
+ "The registration is on progress")
+ }
+ self.register_thread.add(backlog_item)
+ if 0 == self.register_thread.state():
+ self.register_thread.start()
+
+ return Response(status=status.HTTP_202_ACCEPTED)
+
+ except VimDriverNewtonException as e:
+ return Response(data={'error': e.content}, status=e.status_code)
+ except HttpError as e:
+ self._logger.error("HttpError: status:%s, response:%s"
+ % (e.http_status, e.response.json()))
+ return Response(data=e.response.json(), status=e.http_status)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(
+ data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def get(self, request, vimid):
+ try:
+ backlog_item = self.register_thread.get(vimid)
+ if backlog_item:
+ return Response(
+ data={'status': backlog_item.get(
+ "status", "Status not available, vimid: %s" % vimid)},
+ status=status.HTTP_200_OK)
+ else:
+ return Response(
+ data={
+ 'error': "Registration process for "
+ "Cloud Region not found: %s"
+ % vimid
+ },
+ status=status.HTTP_404_NOT_FOUND)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(
+ data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def delete(self, request, vimid=""):
+ self._logger.debug("Registration--delete::data> %s" % request.data)
+ self._logger.debug("Registration--delete::vimid > %s"% vimid)
+ try:
+
+ # compose the one time backlog item
+ backlog_item = {
+ "id": vimid,
+ "worker": self.register_helper.unregistryV0,
+ "payload": (vimid,), # important to add , at the end
+ "repeat": 0,
+ "status": (1, "The de-registration is in progress")
+ }
+ self.register_thread.add(backlog_item)
+ if 0 == self.register_thread.state():
+ self.register_thread.start()
+
+ return Response(
+ status=status.HTTP_204_NO_CONTENT
+ )
+ except VimDriverNewtonException as e:
+ return Response(data={'error': e.content}, status=e.status_code)
+ except HttpError as e:
+ self._logger.error("HttpError: status:%s, response:%s"
+ % (e.http_status, e.response.json()))
+ return Response(data=e.response.json(), status=e.http_status)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+
+
+class RegistryHelper(MultiCloudAAIHelper):
+ '''
+ Helper code to discover and register a cloud region's resource
+ '''
+
+ def __init__(self, multicloud_prefix, aai_base_url):
+ # logger.debug("RegistryHelper __init__: %s" % traceback.format_exc())
+ self.proxy_prefix = multicloud_prefix
+ self.aai_base_url = aai_base_url
+ self._logger = logger
+ super(RegistryHelper, self).__init__(multicloud_prefix, aai_base_url)
+
+ def registryV1(self, cloud_owner, cloud_region_id):
+ # cloud_owner = payload.get("cloud-owner", None)
+ # cloud_region_id = payload.get("cloud-region-id", None)
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return self.registryV0(vimid)
+
+ def registryV0(self, vimid, project_idorname=None):
+ # populate proxy identity url
+ self._update_proxy_identity_endpoint(vimid)
+
+ # prepare request resource to vim instance
+ # get token:
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ sess = None
+ if not viminfo:
+ return (
+ 10,
+ "Cloud Region not found in AAI: %s" % vimid
+ )
+ if project_idorname:
+ try:
+ # check if specified with tenant id
+ sess = VimDriverUtils.get_session(
+ viminfo, tenant_name=None,
+ tenant_id=project_idorname
+ )
+ except Exception as e:
+ pass
+
+ if not sess:
+ try:
+ # check if specified with tenant name
+ sess = VimDriverUtils.get_session(
+ viminfo, tenant_name=project_idorname,
+ tenant_id=None
+ )
+ except Exception as e:
+ pass
+
+ if not sess:
+ # set the default tenant since there is no tenant info in the VIM yet
+ sess = VimDriverUtils.get_session(
+ viminfo, tenant_name=viminfo.get('tenant', None))
+
+ # step 1. discover all projects and populate into AAI
+ retcode, status = self._discover_tenants(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
+
+ # discover all flavors
+ retcode, status = self._discover_flavors(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
+
+ # discover all images
+ retcode, status = self._discover_images(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
+
+ # discover all az
+ retcode, status = self._discover_availability_zones(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
+
+ # discover all vg
+ #self._discover_volumegroups(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
+
+ # discover all snapshots
+ #self._discover_snapshots(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return retcode, status
+
+ # discover all server groups
+ #self.discover_servergroups(request, vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return retcode, status
+
+ # discover all pservers
+ #self._discover_pservers(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return retcode, status
+
+ return (
+ 0,
+ "Registration finished for Cloud Region: %s" % vimid
+ )
+
+ def unregistryV1(self, cloud_owner, cloud_region_id):
+ # cloud_owner = payload.get("cloud-owner", None)
+ # cloud_region_id = payload.get("cloud-region-id", None)
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return self.unregistryV0(vimid)
+
+ def unregistryV0(self, vimid):
+ # prepare request resource to vim instance
+ # get token:
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ if not viminfo:
+ return (
+ 10,
+ "Cloud Region not found:" % vimid
+ )
+
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+
+ # get the resource first
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s?depth=all"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ })
+
+ # get cloud-region
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "GET")
+
+ # add resource-version
+ cloudregiondata = {}
+ if retcode == 0 and content:
+ cloudregiondata = json.JSONDecoder().decode(content)
+ else:
+ return (
+ 10,
+ "Cloud Region not found: %s, %s" % (cloud_owner, cloud_region_id)
+ )
+
+ # step 1. remove all tenants
+ tenants = cloudregiondata.get("tenants", None)
+ for tenant in tenants.get("tenant", []) if tenants else []:
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, cloud_region_id, tenant['tenant-id'])
+
+ # remove all vservers
+ try:
+ # get list of vservers
+ vservers = tenant.get('vservers', {}).get('vserver', [])
+ for vserver in vservers:
+ try:
+ # iterate vport, except will be raised if no l-interface exist
+ for vport in vserver['l-interfaces']['l-interface']:
+ # delete vport
+ vport_delete_url =\
+ aai_cloud_region + \
+ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vport['interface-name'],
+ vport['resource-version'])
+ restcall.req_to_aai(vport_delete_url, "DELETE")
+ except Exception as e:
+ pass
+
+ try:
+ # delete vserver
+ vserver_delete_url =\
+ aai_cloud_region +\
+ "/vservers/vserver/%s?resource-version=%s" \
+ % (vserver['vserver-id'],
+ vserver['resource-version'])
+ restcall.req_to_aai(vserver_delete_url, "DELETE")
+ except Exception as e:
+ continue
+
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
+ "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"
+ "?resource-version=%(resource-version)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resource_type": "tenant",
+ "resoure_id": tenant["tenant-id"],
+ "resource-version": tenant["resource-version"]
+ })
+ # remove tenant
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "DELETE")
+
+ # remove all flavors
+ flavors = cloudregiondata.get("flavors", None)
+ for flavor in flavors.get("flavor", []) if flavors else []:
+ # iterate hpa-capabilities
+ hpa_capabilities = flavor.get("hpa-capabilities", None)
+ for hpa_capability in hpa_capabilities.get("hpa-capability", [])\
+ if hpa_capabilities else []:
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
+ "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"
+ "hpa-capabilities/hpa-capability/%(hpa-capability-id)s/"
+ "?resource-version=%(resource-version)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resource_type": "flavor",
+ "resoure_id": flavor["flavor-id"],
+ "hpa-capability-id": hpa_capability["hpa-capability-id"],
+ "resource-version": hpa_capability["resource-version"]
+ })
+ # remove hpa-capability
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "DELETE")
+
+ # remove flavor
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
+ "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"
+ "?resource-version=%(resource-version)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resource_type": "flavor",
+ "resoure_id": flavor["flavor-id"],
+ "resource-version": flavor["resource-version"]
+ })
+
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "DELETE")
+
+ # remove all images
+ images = cloudregiondata.get("images", None)
+ for image in images.get("image", []) if images else []:
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
+ "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"
+ "?resource-version=%(resource-version)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resource_type": "image",
+ "resoure_id": image["image-id"],
+ "resource-version": image["resource-version"]
+ })
+ # remove image
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "DELETE")
+
+ # remove all az
+
+ # remove all vg
+
+ # remove all snapshots
+ snapshots = cloudregiondata.get("snapshots", None)
+ for snapshot in snapshots.get("snapshot", []) if snapshots else []:
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
+ "%(resource_type)ss/%(resource_type)s/%(resoure_id)s/"
+ "?resource-version=%(resource-version)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resource_type": "snapshot",
+ "resoure_id": snapshot["snapshot-id"],
+ "resource-version": snapshot["resource-version"]
+ })
+ # remove snapshot
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "DELETE")
+
+ # remove all server groups
+
+ # remove all pservers
+
+ # remove cloud region itself
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s"
+ "?resource-version=%(resource-version)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resource-version": cloudregiondata["resource-version"]
+ })
+ # remove cloud region
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "DELETE")
+
+ return retcode, content
+
+ def _discover_tenants(self, vimid="", session=None, viminfo=None):
+ try:
+ # iterate all projects and populate them into AAI
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for tenant in self._get_list_resources(
+ "projects", "identity", session, viminfo, vimid,
+ "projects"):
+ tenant_info = {
+ 'tenant-id': tenant['id'],
+ 'tenant-name': tenant['name'],
+ }
+ self._update_resoure(
+ cloud_owner, cloud_region_id, tenant['id'],
+ tenant_info, "tenant")
+ return 0, "succeed"
+ except VimDriverNewtonException as e:
+ self._logger.error(
+ "VimDriverNewtonException: status:%s, response:%s"
+ % (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ if e.http_status == status.HTTP_403_FORBIDDEN:
+ ### get the tenant information from the token response
+ try:
+ ### get tenant info from the session
+ tmp_auth_state = VimDriverUtils.get_auth_state(session)
+ tmp_auth_info = json.loads(tmp_auth_state)
+ tmp_auth_data = tmp_auth_info['body']
+ tenant = tmp_auth_data['token']['project']
+ tenant_info = {
+ 'tenant-id': tenant['id'],
+ 'tenant-name': tenant['name'],
+ }
+
+ self._update_resoure(
+ cloud_owner, cloud_region_id, tenant['id'],
+ tenant_info, "tenant")
+
+ return 0, "succeed"
+
+ except Exception as ex:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11,
+ str(ex)
+ )
+ else:
+ self._logger.error(
+ "HttpError: status:%s, response:%s"
+ % (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11,
+ str(e)
+ )
+
+ def _discover_flavors(self, vimid="", session=None, viminfo=None):
+ try:
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for flavor in self._get_list_resources(
+ "/flavors/detail", "compute", session, viminfo, vimid,
+ "flavors"):
+ flavor_info = {
+ 'flavor-id': flavor['id'],
+ 'flavor-name': flavor['name'],
+ 'flavor-vcpus': flavor['vcpus'],
+ 'flavor-ram': flavor['ram'],
+ 'flavor-disk': flavor['disk'],
+ 'flavor-ephemeral': flavor['OS-FLV-EXT-DATA:ephemeral'],
+ 'flavor-swap': flavor['swap'],
+ 'flavor-is-public': flavor['os-flavor-access:is_public'],
+ 'flavor-disabled': flavor['OS-FLV-DISABLED:disabled'],
+ }
+
+ if flavor.get('links') and len(flavor['links']) > 0:
+ flavor_info['flavor-selflink'] =\
+ flavor['links'][0]['href'] or 'http://0.0.0.0'
+ else:
+ flavor_info['flavor-selflink'] = 'http://0.0.0.0'
+
+ # add hpa capabilities
+ if (flavor['name'].find('onap.') == 0):
+ req_resouce = "/flavors/%s/os-extra_specs" % flavor['id']
+ extraResp = self._get_list_resources(
+ req_resouce, "compute", session,
+ viminfo, vimid, "extra_specs")
+
+ hpa_capabilities =\
+ self._get_hpa_capabilities(flavor, extraResp, viminfo)
+ flavor_info['hpa-capabilities'] = \
+ {'hpa-capability': hpa_capabilities}
+
+ retcode, content = self._update_resoure(
+ cloud_owner, cloud_region_id, flavor['id'],
+ flavor_info, "flavor")
+
+ return (0, "succeed")
+ except VimDriverNewtonException as e:
+ self._logger.error(
+ "VimDriverNewtonException: status:%s, response:%s" %
+ (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ self._logger.error("HttpError: status:%s, response:%s" %
+ (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+
+ def _get_hpa_capabilities(self, flavor, extra_specs, viminfo):
+ hpa_caps = []
+
+ # Basic capabilties
+ caps_dict = self._get_hpa_basic_capabilities(flavor)
+ if len(caps_dict) > 0:
+ self._logger.debug("basic_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # cpupining capabilities
+ caps_dict = self._get_cpupining_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("cpupining_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # cputopology capabilities
+ caps_dict = self._get_cputopology_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("cputopology_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # hugepages capabilities
+ caps_dict = self._get_hugepages_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("hugepages_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # numa capabilities
+ caps_dict = self._get_numa_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("numa_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # storage capabilities
+ caps_dict = self._get_storage_capabilities(flavor)
+ if len(caps_dict) > 0:
+ self._logger.debug("storage_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # CPU instruction set extension capabilities
+ caps_dict = self._get_instruction_set_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("instruction_set_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # PCI passthrough capabilities
+ caps_dict = self._get_pci_passthrough_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("pci_passthrough_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # SRIOV-NIC capabilities
+ caps_dict = self._get_sriov_nic_capabilities(extra_specs)
+ if len(caps_dict) > 0:
+ self._logger.debug("sriov_nic_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ # ovsdpdk capabilities
+ caps_dict = self._get_ovsdpdk_capabilities(extra_specs, viminfo)
+ if len(caps_dict) > 0:
+ self._logger.debug("ovsdpdk_capabilities_info: %s" % caps_dict)
+ hpa_caps.append(caps_dict)
+
+ logger.debug("hpa_caps:%s" % hpa_caps)
+ return hpa_caps
+
+ def _get_hpa_basic_capabilities(self, flavor):
+ basic_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ basic_capability['hpa-capability-id'] = str(feature_uuid)
+ basic_capability['hpa-feature'] = 'basicCapabilities'
+ basic_capability['architecture'] = 'generic'
+ basic_capability['hpa-version'] = 'v1'
+
+ basic_capability['hpa-feature-attributes'] = []
+ basic_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'numVirtualCpu',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(flavor['vcpus'])
+ })
+ basic_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key':'virtualMemSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(flavor['ram'],"MB")
+ })
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+
+ return basic_capability
+
+ def _get_cpupining_capabilities(self, extra_specs):
+ cpupining_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ if 'hw:cpu_policy' in extra_specs\
+ or 'hw:cpu_thread_policy' in extra_specs:
+ cpupining_capability['hpa-capability-id'] = str(feature_uuid)
+ cpupining_capability['hpa-feature'] = 'cpuPinning'
+ cpupining_capability['architecture'] = 'generic'
+ cpupining_capability['hpa-version'] = 'v1'
+
+ cpupining_capability['hpa-feature-attributes'] = []
+ if 'hw:cpu_thread_policy' in extra_specs:
+ cpupining_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'logicalCpuThreadPinningPolicy',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(
+ extra_specs['hw:cpu_thread_policy'])
+ })
+ if 'hw:cpu_policy' in extra_specs:
+ cpupining_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key':'logicalCpuPinningPolicy',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(
+ extra_specs['hw:cpu_policy'])
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return cpupining_capability
+
+ def _get_cputopology_capabilities(self, extra_specs):
+ cputopology_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ if 'hw:cpu_sockets' in extra_specs\
+ or 'hw:cpu_cores' in extra_specs\
+ or 'hw:cpu_threads' in extra_specs:
+ cputopology_capability['hpa-capability-id'] = str(feature_uuid)
+ cputopology_capability['hpa-feature'] = 'cpuTopology'
+ cputopology_capability['architecture'] = 'generic'
+ cputopology_capability['hpa-version'] = 'v1'
+
+ cputopology_capability['hpa-feature-attributes'] = []
+ if 'hw:cpu_sockets' in extra_specs:
+ cputopology_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'numCpuSockets',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:cpu_sockets'])
+ })
+ if 'hw:cpu_cores' in extra_specs:
+ cputopology_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'numCpuCores',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:cpu_cores'])
+ })
+ if 'hw:cpu_threads' in extra_specs:
+ cputopology_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'numCpuThreads',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:cpu_threads'])
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return cputopology_capability
+
+ def _get_hugepages_capabilities(self, extra_specs):
+ hugepages_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ if 'hw:mem_page_size' in extra_specs:
+ hugepages_capability['hpa-capability-id'] = str(feature_uuid)
+ hugepages_capability['hpa-feature'] = 'hugePages'
+ hugepages_capability['architecture'] = 'generic'
+ hugepages_capability['hpa-version'] = 'v1'
+
+ hugepages_capability['hpa-feature-attributes'] = []
+ if extra_specs['hw:mem_page_size'] == 'large':
+ hugepages_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'memoryPageSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(2,"MB")
+ })
+ elif extra_specs['hw:mem_page_size'] == 'small':
+ hugepages_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'memoryPageSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(4,"KB")
+ })
+ elif extra_specs['hw:mem_page_size'] == 'any':
+ self._logger.info("Currently HPA feature memoryPageSize did not support 'any' page!!")
+ else :
+ hugepages_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'memoryPageSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(extra_specs['hw:mem_page_size'],"KB")
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return hugepages_capability
+
+ def _get_numa_capabilities(self, extra_specs):
+ numa_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ if 'hw:numa_nodes' in extra_specs:
+ numa_capability['hpa-capability-id'] = str(feature_uuid)
+ numa_capability['hpa-feature'] = 'numa'
+ numa_capability['architecture'] = 'generic'
+ numa_capability['hpa-version'] = 'v1'
+
+ numa_capability['hpa-feature-attributes'] = []
+ numa_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'numaNodes',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(extra_specs['hw:numa_nodes'] or 0)
+ })
+
+ for num in range(0, int(extra_specs['hw:numa_nodes'])):
+ numa_cpu_node = "hw:numa_cpus.%s" % num
+ numa_mem_node = "hw:numa_mem.%s" % num
+ numacpu_key = "numaCpu-%s" % num
+ numamem_key = "numaMem-%s" % num
+
+ if numa_cpu_node in extra_specs and numa_mem_node in extra_specs:
+ numa_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': numacpu_key,
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(extra_specs[numa_cpu_node])
+ })
+ numa_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': numamem_key,
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(extra_specs[numa_mem_node],"MB")
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return numa_capability
+
+ def _get_storage_capabilities(self, flavor):
+ storage_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ storage_capability['hpa-capability-id'] = str(feature_uuid)
+ storage_capability['hpa-feature'] = 'localStorage'
+ storage_capability['architecture'] = 'generic'
+ storage_capability['hpa-version'] = 'v1'
+
+ storage_capability['hpa-feature-attributes'] = []
+ storage_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'diskSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(
+ flavor['disk'] or 0, "GB")
+ })
+ storage_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'swapMemSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(
+ flavor['swap'] or 0, "MB")
+ })
+ storage_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'ephemeralDiskSize',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(
+ flavor['OS-FLV-EXT-DATA:ephemeral'] or 0, "GB")
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return storage_capability
+
+ def _get_instruction_set_capabilities(self, extra_specs):
+ instruction_capability = {}
+ feature_uuid = uuid.uuid4()
+ try:
+ if 'hw:capabilities:cpu_info:features' in extra_specs:
+ instruction_capability['hpa-capability-id'] = str(feature_uuid)
+ instruction_capability['hpa-feature'] = 'instructionSetExtensions'
+ instruction_capability['architecture'] = 'Intel64'
+ instruction_capability['hpa-version'] = 'v1'
+
+ instruction_capability['hpa-feature-attributes'] = []
+ instruction_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'instructionSetExtensions',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(
+ extra_specs['hw:capabilities:cpu_info:features'])
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return instruction_capability
+
+ def _get_pci_passthrough_capabilities(self, extra_specs):
+ pci_passthrough_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+
+ if 'pci_passthrough:alias' in extra_specs:
+ value1 = extra_specs['pci_passthrough:alias'].split(':')
+ value2 = value1[0].split('-')
+
+ pci_passthrough_capability['hpa-capability-id'] = str(feature_uuid)
+ pci_passthrough_capability['hpa-feature'] = 'pciePassthrough'
+ pci_passthrough_capability['architecture'] = str(value2[2])
+ pci_passthrough_capability['hpa-version'] = 'v1'
+
+
+ pci_passthrough_capability['hpa-feature-attributes'] = []
+ pci_passthrough_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'pciCount',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value1[1])
+ })
+ pci_passthrough_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'pciVendorId',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value2[3])
+ })
+ pci_passthrough_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'pciDeviceId',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value2[4])
+ })
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return pci_passthrough_capability
+
+ def _get_sriov_nic_capabilities(self, extra_specs):
+ sriov_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ if 'aggregate_instance_extra_specs:sriov_nic' in extra_specs:
+ value1 = extra_specs['aggregate_instance_extra_specs:sriov_nic'].split(':')
+ value2 = value1[0].split('-', 5)
+
+ sriov_capability['hpa-capability-id'] = str(feature_uuid)
+ sriov_capability['hpa-feature'] = 'sriovNICNetwork'
+ sriov_capability['architecture'] = str(value2[2])
+ sriov_capability['hpa-version'] = 'v1'
+
+ sriov_capability['hpa-feature-attributes'] = []
+ sriov_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'pciCount',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value1[1])})
+ sriov_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'pciVendorId',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value2[3])})
+ sriov_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'pciDeviceId',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value2[4])})
+ sriov_capability['hpa-feature-attributes'].append(
+ {'hpa-attribute-key': 'physicalNetwork',
+ 'hpa-attribute-value':
+ '{{\"value\":\"{0}\"}}'.format(value2[5])})
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return sriov_capability
+
+ def _get_ovsdpdk_capabilities(self, extra_specs, viminfo):
+ ovsdpdk_capability = {}
+ feature_uuid = uuid.uuid4()
+
+ try:
+ cloud_extra_info_str = viminfo.get('cloud_extra_info')
+ if not isinstance(cloud_extra_info_str, dict):
+ try:
+ cloud_extra_info_str = json.loads(cloud_extra_info_str)
+ except Exception as ex:
+ logger.error("Can not convert cloud extra info %s %s" % (
+ str(ex), cloud_extra_info_str))
+ return {}
+ if cloud_extra_info_str :
+ cloud_dpdk_info = cloud_extra_info_str.get("ovsDpdk")
+ if cloud_dpdk_info :
+ ovsdpdk_capability['hpa-capability-id'] = str(feature_uuid)
+ ovsdpdk_capability['hpa-feature'] = 'ovsDpdk'
+ ovsdpdk_capability['architecture'] = 'Intel64'
+ ovsdpdk_capability['hpa-version'] = 'v1'
+
+ ovsdpdk_capability['hpa-feature-attributes'] = [
+ {
+ 'hpa-attribute-key': str(cloud_dpdk_info.get("libname")),
+ 'hpa-attribute-value': '{{\"value\":\"{0}\"}}'.format(
+ cloud_dpdk_info.get("libversion"))
+ },]
+ except Exception:
+ self._logger.error(traceback.format_exc())
+
+ return ovsdpdk_capability
+
+ # def update_image_metadata(self, cloud_owner, cloud_region_id, image_id, metadatainfo):
+ # '''
+ # populate image meta data
+ # :param cloud_owner:
+ # :param cloud_region_id:
+ # :param image_id:
+ # :param metadatainfo:
+ # metaname: string
+ # metaval: string
+ # :return:
+ # '''
+ #
+ # if cloud_owner and cloud_region_id:
+ # retcode, content, status_code = \
+ # restcall.req_to_aai(
+ # "/cloud-infrastructure/cloud-regions/cloud-region"
+ # + "/%s/%s/images/image/%s/metadata/metadatum/%s"
+ # % (cloud_owner, cloud_region_id, image_id, metadatainfo['metaname']),
+ # "PUT", content=metadatainfo)
+ #
+ # self._logger.debug("update_image,vimid:%s_%s req_to_aai: %s/%s, return %s, %s, %s"
+ # % (cloud_owner,cloud_region_id,image_id,metadatainfo['metaname'],
+ # retcode, content, status_code))
+ # return retcode
+ # return 1
+
+ def _discover_images(self, vimid="", session=None, viminfo=None):
+ try:
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for image in self._get_list_resources(
+ "/v2/images", "image", session, viminfo, vimid,
+ "images"):
+ image_info = {
+ 'image-id': image['id'],
+ 'image-name': image['name'],
+ 'image-selflink': image['self'],
+
+ 'image-os-distro': image.get('os_distro') or 'Unknown',
+ 'image-os-version': image.get('os_version') or 'Unknown',
+ 'application': image.get('application'),
+ 'application-vendor': image.get('application_vendor'),
+ 'application-version': image.get('application_version'),
+ 'image-architecture': image.get('architecture'),
+ }
+
+ ret = self._update_resoure(
+ cloud_owner, cloud_region_id, image['id'], image_info,
+ "image")
+ if ret != 0:
+ # failed to update image
+ self._logger.debug(
+ "failed to populate image info into AAI: %s,"
+ " image id: %s, ret:%s"
+ % (vimid, image_info['image-id'], ret))
+ continue
+
+ schema = image['schema']
+ if schema:
+ req_resource = schema
+ service = {'service_type': "image",
+ 'interface': 'public',
+ 'region_name': viminfo['openstack_region_id']
+ if viminfo.get('openstack_region_id')
+ else viminfo['cloud_region_id']
+ }
+
+ self._logger.info("making request with URI:%s" %
+ req_resource)
+ resp = session.get(req_resource, endpoint_filter=service)
+ self._logger.info("request returns with status %s" %
+ resp.status_code)
+ if resp.status_code == status.HTTP_200_OK:
+ self._logger.debug("with content:%s" %
+ resp.json())
+ pass
+ content = resp.json()
+
+ # if resp.status_code == status.HTTP_200_OK:
+ # parse the schema? TBD
+ # self.update_image(cloud_owner, cloud_region_id, image_info)
+ #metadata_info = {}
+ return (0, "succeed")
+ except VimDriverNewtonException as e:
+ self._logger.error("VimDriverNewtonException:"
+ " status:%s, response:%s" %
+ (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ self._logger.error("HttpError: status:%s, response:%s" %
+ (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+
+ def _discover_availability_zones(self, vimid="", session=None,
+ viminfo=None):
+ try:
+ az_pserver_info = {}
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for az in self._get_list_resources(
+ "/os-availability-zone/detail", "compute", session,
+ viminfo, vimid,
+ "availabilityZoneInfo"):
+ az_info = {
+ 'availability-zone-name': az['zoneName'],
+ 'operational-status': az['zoneState']['available']
+ if az.get('zoneState') else '',
+ 'hypervisor-type': '',
+ }
+ # filter out the default az: "internal" and "nova"
+ azName = az.get('zoneName', None)
+ # comment it for test the registration process only
+ # if azName == 'nova':
+ # continue
+ if azName == 'internal':
+ continue
+
+ # get list of host names
+ pservers_info = [k for (k, v) in list(az['hosts'].items())]
+ # set the association between az and pservers
+ az_pserver_info[azName] = pservers_info
+
+ az_info['hypervisor-type'] = 'QEMU' # default for OpenStack
+
+ ret, content = self._update_resoure(
+ cloud_owner, cloud_region_id, az['zoneName'], az_info,
+ "availability-zone")
+ if ret != 0:
+ # failed to update image
+ self._logger.debug(
+ "failed to populate az info into AAI: "
+ "%s, az name: %s, ret:%s"
+ % (vimid, az_info['availability-zone-name'], ret))
+ # return (
+ # ret,
+ # "fail to popluate az info into AAI:%s" % content
+ # )
+ continue
+
+ # populate pservers:
+ for hostname in pservers_info:
+ if hostname == "":
+ continue
+
+ pservername = vimid+"_"+hostname
+ selflink = ""
+ # if self.proxy_prefix[3:] == "/v1":
+ # selflink = "%s/%s/%s/compute/os-hypervisors/detail?hypervisor_hostname_pattern=%s"%\
+ # (self.proxy_prefix, cloud_owner, cloud_region_id , hostname)
+ # else:
+ # selflink = "%s/%s/compute/os-hypervisors/detail?hypervisor_hostname_pattern=%s" % \
+ # (self.proxy_prefix, vimid, hostname)
+
+ pinfo = {
+ "hostname": pservername,
+ "server-selflink": selflink,
+ "pserver-id": hostname
+ }
+ self._update_pserver(cloud_owner, cloud_region_id, pinfo)
+ self._update_pserver_relation_az(cloud_owner, cloud_region_id, pinfo, azName)
+ self._update_pserver_relation_cloudregion(cloud_owner, cloud_region_id, pinfo)
+
+ return (0, az_pserver_info)
+ except VimDriverNewtonException as e:
+ self._logger.error(
+ "VimDriverNewtonException: status:%s,"
+ " response:%s" % (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ self._logger.error(
+ "HttpError: status:%s, response:%s" %
+ (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+
+ # def _discover_volumegroups(self, vimid="", session=None, viminfo=None):
+ # cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ # for cg in self._get_list_resources(
+ # "/consistencygroups/detail", "volumev3", session,
+ # viminfo, vimid,
+ # "consistencygroups"):
+ # vg_info = {
+ # 'volume-group-id': cg['id'],
+ # 'volume-group-name': cg['name'],
+ # 'vnf-type': '',
+ # }
+ #
+ # ret = self._update_resoure(
+ # cloud_owner, cloud_region_id, cg['id'], vg_info,
+ # "volume-group")
+ # if ret != 0:
+ # # failed to update image
+ # self._logger.debug("failed to populate volumegroup info into AAI: %s, volume-group-id: %s, ret:%s"
+ # % (vimid, vg_info['volume-group-id'], ret))
+
+ def _discover_snapshots(self, vimid="", session=None, viminfo=None):
+ try:
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for ss in self._get_list_resources(
+ "/snapshots/detail", "volumev3", session,
+ viminfo, vimid,
+ "snapshots"):
+ snapshot_info = {
+ 'snapshot-id': ss['id'],
+ 'snapshot-name': ss['name'],
+ }
+ if ss.get('metadata'):
+ snapshot_info['snapshot-architecture'] = ss['metadata'].get('architecture')
+ snapshot_info['application'] = ss['metadata'].get('architecture')
+ snapshot_info['snapshot-os-distro'] = ss['metadata'].get('os-distro')
+ snapshot_info['snapshot-os-version'] = ss['metadata'].get('os-version')
+ snapshot_info['application-vendor'] = ss['metadata'].get('vendor')
+ snapshot_info['application-version'] = ss['metadata'].get('version')
+ snapshot_info['snapshot-selflink'] = ss['metadata'].get('selflink')
+ snapshot_info['prev-snapshot-id'] = ss['metadata'].get('prev-snapshot-id')
+
+ ret, content = self._update_resoure(
+ cloud_owner, cloud_region_id, ss['id'], snapshot_info,
+ "snapshot")
+ if ret != 0:
+ # failed to update image
+ self._logger.debug("failed to populate snapshot info into AAI: %s, snapshot-id: %s, ret:%s"
+ % (vimid, snapshot_info['snapshot-id'], ret))
+ return (
+ ret,
+ "fail to populate snapshot into AAI:%s" % content
+ )
+ return 0, "Succeed"
+ except VimDriverNewtonException as e:
+ self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+
+ # def _discover_servergroups(self, vimid="", session=None, viminfo=None):
+ # for sg in self._get_list_resources(
+ # "/os-server-groups", "compute", session,
+ # viminfo, vimid,
+ # "security groups"):
+
+ def _update_pserver_relation_az(self, cloud_owner, cloud_region_id, pserverinfo, azName):
+ related_link = \
+ "/aai/%s/cloud-infrastructure/cloud-regions/"\
+ "cloud-region/%s/%s/"\
+ "availability-zones/availability-zone/%s" % (
+ settings.AAI_SCHEMA_VERSION, cloud_owner,
+ cloud_region_id, azName)
+
+ relationship_data = \
+ {
+ 'related-to': 'availability-zone',
+ 'related-link': related_link,
+ 'relationship-data': [
+ {
+ 'relationship-key': 'availability-zone.availability-zone-name',
+ 'relationship-value': azName
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "availability-zone.availability-zone-name"
+ }
+ ]
+ }
+
+ retcode, content, status_code = \
+ restcall.req_to_aai("/cloud-infrastructure/pservers/pserver/%s"
+ "/relationship-list/relationship"
+ % (pserverinfo['hostname']), "PUT",
+ content=relationship_data)
+
+ self._logger.debug("update_pserver_az_relation,vimid:%s_%s, "
+ "az:%s req_to_aai: %s, return %s, %s, %s"
+ % (cloud_owner, cloud_region_id, azName,
+ pserverinfo['hostname'], retcode, content,
+ status_code))
+ return (
+ 0,
+ "succeed"
+ )
+
+ def _update_pserver_relation_cloudregion(
+ self,
+ cloud_owner,
+ cloud_region_id,
+ pserverinfo
+ ):
+ related_link = \
+ "/aai/%s/cloud-infrastructure/cloud-regions/"\
+ "cloud-region/%s/%s" % (
+ settings.AAI_SCHEMA_VERSION, cloud_owner,
+ cloud_region_id)
+
+ relationship_data = \
+ {
+ 'related-to': 'cloud-region',
+ 'related-link': related_link,
+ 'relationship-data': [
+ {
+ 'relationship-key': 'cloud-region.cloud-owner',
+ 'relationship-value': cloud_owner
+ },
+ {
+ 'relationship-key': 'cloud-region.cloud-region-id',
+ 'relationship-value': cloud_region_id
+ }
+ ],
+ "related-to-property": [
+ {
+ "property-key": "cloud-region.cloud-owner"
+ },
+ {
+ "property-key": "cloud-region.cloud-region-id"
+ }
+ ]
+ }
+
+ retcode, content, status_code = \
+ restcall.req_to_aai("/cloud-infrastructure/pservers/pserver"
+ "/%s/relationship-list/relationship"
+ % (pserverinfo['hostname']), "PUT",
+ content=relationship_data)
+
+ self._logger.debug("update_pserver_cloudregion_relation,vimid:%s_%s"
+ " req_to_aai: %s, return %s, %s, %s"
+ % (cloud_owner, cloud_region_id,
+ pserverinfo['hostname'], retcode, content,
+ status_code))
+ return (
+ 0,
+ "succeed"
+ )
+
+ def _update_pserver(self, cloud_owner, cloud_region_id, pserverinfo):
+ '''
+ populate pserver into AAI
+ :param cloud_owner:
+ :param cloud_region_id:
+ :param pserverinfo:
+ hostname: string
+ in-maint: boolean
+
+ pserver-name2: string
+ pserver-id: string
+ ptnii-equip-name: string
+ number-of-cpus: integer
+ disk-in-gigabytes: integer
+ ram-in-megabytes: integer
+ equip-type: string
+ equip-vendor: string
+ equip-model: string
+ fqdn: string
+ pserver-selflink: string
+ ipv4-oam-address: string
+ serial-number: string
+ ipaddress-v4-loopback-0: string
+ ipaddress-v6-loopback-0: string
+ ipaddress-v4-aim: string
+ ipaddress-v6-aim: string
+ ipaddress-v6-oam: string
+ inv-status: string
+ internet-topology: string
+ purpose: string
+ prov-status: string
+ management-option: string
+ host-profile: string
+
+ :return:
+ '''
+
+ if cloud_owner and cloud_region_id:
+ resource_url = "/cloud-infrastructure/pservers/pserver/%s" \
+ % (pserverinfo['hostname'])
+
+ # get cloud-region
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "GET")
+
+ # add resource-version to url
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ #pserverinfo["resource-version"] = content["resource-version"]
+ content.update(pserverinfo)
+ pserverinfo = content
+
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "PUT", content=pserverinfo)
+
+ self._logger.debug(
+ "update_snapshot,vimid:%s_%s req_to_aai: %s,"
+ " return %s, %s, %s" % (
+ cloud_owner, cloud_region_id,
+ pserverinfo['hostname'],
+ retcode, content, status_code))
+
+ return retcode, content
+ else:
+ # unknown cloud owner,region_id
+ return (
+ 10,
+ "Cloud Region not found: %s,%s"
+ % (cloud_owner, cloud_region_id)
+ )
+
+ def _discover_pservers(self, vimid="", session=None, viminfo=None):
+ try:
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for hypervisor in self._get_list_resources(
+ "/os-hypervisors/detail", "compute", session,
+ viminfo, vimid,
+ "hypervisors"):
+ hypervisor_info = {
+ 'hostname': hypervisor['hypervisor_hostname'],
+ 'in-maint': hypervisor['state'],
+
+ 'pserver-id': hypervisor.get('id'),
+ 'ptnii-equip-name': hypervisor.get('id'),
+ 'disk-in-gigabytes': hypervisor.get('local_gb'),
+ 'ram-in-megabytes': hypervisor.get('memory_mb'),
+ 'pserver-selflink': hypervisor.get('hypervisor_links'),
+ 'ipv4-oam-address': hypervisor.get('host_ip'),
+ }
+
+ if hypervisor.get('cpu_info'):
+ cpu_info = json.loads(hypervisor['cpu_info'])
+ if cpu_info.get('topology'):
+ cputopo = cpu_info.get('topology')
+ n_cpus = cputopo['cores'] * cputopo['threads'] * cputopo['sockets']
+ hypervisor_info['number-of-cpus'] = n_cpus
+
+ ret, content = self._update_pserver(cloud_owner, cloud_region_id,
+ hypervisor_info)
+ if ret != 0:
+ # failed to update image
+ self._logger.debug(
+ "failed to populate pserver info into AAI:"
+ " %s, hostname: %s, ret:%s"
+ % (vimid, hypervisor_info['hostname'], ret))
+ return ret, "fail to update pserver to AAI:%s" % content
+
+ return 0, "succeed"
+ except VimDriverNewtonException as e:
+ self._logger.error(
+ "VimDriverNewtonException: status:%s, response:%s"
+ % (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ self._logger.error(
+ "HttpError: status:%s, response:%s"
+ % (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+
+ def _update_proxy_identity_endpoint(self, vimid):
+ '''
+ update cloud_region's identity url
+ :param cloud_owner:
+ :param cloud_region_id:
+ :param url:
+ :return:
+ '''
+ try:
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ if cloud_owner and cloud_region_id:
+ resource_url = \
+ "/cloud-infrastructure/cloud-regions" \
+ "/cloud-region/%s/%s" \
+ % (cloud_owner, cloud_region_id)
+
+ # get cloud-region
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "GET")
+
+ # add resource-version to url
+ if retcode == 0 and content:
+ viminfo = json.JSONDecoder().decode(content)
+ viminfo['identity-url'] =\
+ self.proxy_prefix + "/%s/identity/v2.0" % vimid \
+ if self.proxy_prefix[-3:] == "/v0" \
+ else self.proxy_prefix +\
+ "/%s/%s/identity/v2.0"\
+ % extsys.decode_vim_id(vimid)
+
+ retcode, content, status_code = \
+ restcall.req_to_aai(
+ "/cloud-infrastructure/cloud-regions"
+ "/cloud-region/%s/%s"
+ % (cloud_owner, cloud_region_id), "PUT",
+ content=viminfo)
+
+ self._logger.debug(
+ "update_proxy_identity_endpoint,vimid:"
+ "%s req_to_aai: %s, return %s, %s, %s"
+ % (vimid, viminfo['identity-url'],
+ retcode, content, status_code))
+ return 0, "succeed"
+ else:
+ self._logger.debug(
+ "failure: update_proxy_identity_endpoint,vimid:"
+ "%s req_to_aai: return %s, %s, %s"
+ % (vimid, retcode, content, status_code))
+ return retcode, content
+ else:
+ return (
+ 10,
+ "Cloud Region not found: %s" % vimid
+ )
+
+ except VimDriverNewtonException as e:
+ self._logger.error(
+ "VimDriverNewtonException: status:%s, response:%s"
+ % (e.http_status, e.content))
+ return (
+ e.http_status, e.content
+ )
+ except HttpError as e:
+ self._logger.error(
+ "HttpError: status:%s, response:%s"
+ % (e.http_status, e.response.json()))
+ return (
+ e.http_status, e.response.json()
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return (
+ 11, str(e)
+ )
+