11 import mysql.connector
16 from novaclient import client as openstackclient
17 from kubernetes import client, config
18 from netaddr import IPAddress, IPNetwork
20 ######################################################################
21 # Parts which must be updated / cross-checked during each deployment #
22 # are marked as CHANGEME #
23 ######################################################################
26 #############################################################################################
27 # Set network prefix of k8s host external address; it's used for pod public IP autodetection
28 # but can be overriden from user in case of autodetection failure
29 external_net_addr = '10.12.0.0'
30 external_net_prefix_len = 16
32 #############################################################################################
33 # set the openstack cloud access credentials here
36 #############################################################################################
37 # set the gra_api flag
38 # Mustn't be set to True until Frankfurt DGs are updated for GRA-API infrastructure
41 ###########################
42 # set Openstack credentials
45 '--os-auth-url': 'http://10.12.25.2:5000',
46 '--os-username': 'kxi',
47 '--os-user-domain-id': 'default',
48 '--os-project-domain-id': 'default',
49 '--os-tenant-id': '712b6016580e410b9abfec9ca34953ce' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
50 '--os-region-name': 'RegionOne',
51 '--os-password': 'n3JhGMGuDzD8',
52 '--os-project-domain-name': 'Integration-Release-Daily' if oom_mode else 'Integration-SB-07',
53 '--os-identity-api-version': '3'
56 ############################################################################
57 # set oam and public network which must exist in openstack before deployment
59 common_preload_config = {
60 'oam_onap_net': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
61 'oam_onap_subnet': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
62 'public_net': 'external',
63 'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
66 #############################################################################
67 # Set name of Onap's k8s namespace and sdnc controller pod
69 onap_namespace = 'onap'
70 onap_environment = 'dev'
71 sdnc_controller_pod = '-'.join([onap_environment, 'sdnc-sdnc-0'])
73 template_variable_symbol = '${'
74 cpe_vm_prefix = 'zdcpe'
76 #############################################################################################
77 # preloading network config
79 # value = [subnet_start_ip, subnet_gateway_ip]
80 preload_network_config = {
81 'cpe_public': ['10.2.0.2', '10.2.0.1'],
82 'cpe_signal': ['10.4.0.2', '10.4.0.1'],
83 'brg_bng': ['10.3.0.2', '10.3.0.1'],
84 'bng_mux': ['10.1.0.10', '10.1.0.1'],
85 'mux_gw': ['10.5.0.10', '10.5.0.1']
88 dcae_ves_collector_name = 'dcae-bootstrap'
89 global_subscriber_id = 'SDN-ETHERNET-INTERNET'
90 project_name = 'Project-Demonstration'
91 owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
92 owning_entity_name = 'OE-Demonstration1'
94 def __init__(self, extra_host_names=None):
95 self.logger = logging.getLogger(__name__)
96 self.logger.setLevel(logging.DEBUG)
97 self.logger.info('Initializing configuration')
99 # Read configuration from config file
102 # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
103 self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
104 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
105 self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
106 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
107 self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
108 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
109 self.mr_ip_addr = self.oom_so_sdnc_aai_ip
110 self.mr_ip_port = '30227'
111 self.so_nbi_port = '30277' if self.oom_mode else '8080'
112 self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
113 self.aai_query_port = '30233' if self.oom_mode else '8443'
114 self.sniro_port = '30288' if self.oom_mode else '8080'
116 self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name, 'mariadb-galera']
118 self.host_names.extend(extra_host_names)
120 self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
121 # this is the keyword used to name vgw stack, must not be used in other stacks
122 self.vgw_name_keyword = 'base_vcpe_vgw'
123 # this is the file that will keep the index of last assigned SO name
124 self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
125 self.svc_instance_uuid_file = '__var/svc_instance_uuid'
126 self.preload_dict_file = '__var/preload_dict'
127 self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
128 self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
129 self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
130 self.instance_name_prefix = {
131 'service': 'vcpe_svc',
132 'network': 'vcpe_net',
134 'vfmodule': 'vcpe_vfmodule'
136 self.aai_userpass = 'AAI', 'AAI'
138 ############################################################################################################
139 # following key is overriding public key from vCPE heat templates, it's important to use correct one in here
141 self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
143 self.os_tenant_id = self.cloud['--os-tenant-id']
144 self.os_region_name = self.cloud['--os-region-name']
145 self.common_preload_config['pub_key'] = self.pub_key
146 self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
147 self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
148 self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
149 # self.homing_solution = 'oof'
150 self.customer_location_used_by_oof = {
151 "customerLatitude": "32.897480",
152 "customerLongitude": "-97.040443",
153 "customerName": "some_company"
156 #############################################################################################
158 self.sdc_be_port = '30204'
159 self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
160 self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
161 self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
162 self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
164 self.sdc_fe_port = '30207'
165 self.sdc_fe_request_userpass = 'beep', 'boop'
166 self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
167 self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
168 self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
169 self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
171 #############################################################################################
173 self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
174 self.sdnc_db_name = 'sdnctl'
175 self.sdnc_db_user = 'sdnctl'
176 self.sdnc_db_pass = 'gamma'
177 self.sdnc_db_port = self.get_k8s_service_endpoint_info('mariadb-galera','port') if self.oom_mode else '3306'
178 self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
179 self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
180 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
181 self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
182 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
183 self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
184 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
185 self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
186 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
187 self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
188 '/restconf/config/GENERIC-RESOURCE-API:'
190 #############################################################################################
191 # MARIADB-GALERA settings
192 self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
193 self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
195 #############################################################################################
196 # SO urls, note: do NOT add a '/' at the end of the url
197 self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
198 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
199 self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
200 self.so_userpass = 'InfraPortalClient', 'password1$'
201 self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
202 self.so_db_name = 'catalogdb'
203 self.so_db_user = 'root'
204 self.so_db_pass = 'secretpassword'
205 self.so_db_host = self.mariadb_galera_endpoint_ip if self.oom_mode else self.hosts['so']
206 self.so_db_port = self.mariadb_galera_endpoint_port if self.oom_mode else '3306'
208 self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
209 self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
210 self.vpp_api_userpass = ('admin', 'admin')
211 self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
213 #############################################################################################
215 self.policy_userpass = ('healthcheck', 'zb!XztG34')
216 self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
217 self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
218 self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
219 self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
220 self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
221 self.policy_api_service_name = 'policy-api'
222 self.policy_pap_service_name = 'policy-pap'
224 #############################################################################################
226 self.aai_region_query_url = 'https://' + self.oom_so_sdnc_aai_ip + ':' +\
227 self.aai_query_port +\
228 '/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/' +\
229 self.cloud['--os-region-name']
230 self.aai_headers = {'Accept': 'application/json',
231 'Content-Type': 'application/json',
232 'X-FromAppId': 'postman', 'X-TransactionId': '9999'}
234 def _load_config(self, cfg_file='vcpeconfig.yaml'):
236 Reads vcpe config file and injects settings as object's attributes
237 :param cfg_file: Configuration file path
241 with open(cfg_file, 'r') as cfg:
242 cfg_yml = yaml.full_load(cfg)
243 except Exception as e:
244 self.logger.error('Error loading configuration: ' + str(e))
247 self.logger.debug('\n' + yaml.dump(cfg_yml))
249 # Use setattr to load config file keys as VcpeCommon class' object
252 # Check config isn't empty
253 if cfg_yml is not None:
254 for cfg_key in cfg_yml:
255 setattr(self, cfg_key, cfg_yml[cfg_key])
256 except TypeError as e:
257 self.logger.error('Unable to parse config file: ' + str(e))
260 def heatbridge(self, openstack_stack_name, svc_instance_uuid):
262 Add vserver information to AAI
264 self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
265 if not self.oom_mode:
266 cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
267 ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
268 self.logger.debug('%s', ret)
270 print('To add vGMUX vserver info to AAI, do the following:')
271 print('- ssh to rancher')
273 print('- cd /root/oom/kubernetes/robot')
274 print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
276 def get_brg_mac_from_sdnc(self):
278 Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
279 Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
282 db_host=self.mariadb_galera_endpoint_ip
284 db_host=self.hosts['mariadb-galera']
286 cnx = mysql.connector.connect(user=self.sdnc_db_user,
287 password=self.sdnc_db_pass,
288 database=self.sdnc_db_name,
290 port=self.sdnc_db_port)
291 cursor = cnx.cursor()
292 query = "SELECT * from DHCP_MAP"
293 cursor.execute(query)
295 self.logger.debug('DHCP_MAP table in SDNC')
298 for mac, ip in cursor:
299 self.logger.debug(mac + ' - ' + ip)
300 this_host = int(ip.split('.')[-1])
309 except AssertionError:
310 self.logger.error('Failed to obtain BRG MAC address from database')
315 def execute_cmds_mariadb(self, cmds):
316 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
317 self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
318 self.mariadb_galera_endpoint_port)
320 def execute_cmds_sdnc_db(self, cmds):
321 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
322 self.hosts['sdnc'], self.sdnc_db_port)
324 def execute_cmds_so_db(self, cmds):
325 self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
326 self.so_db_host, self.so_db_port)
328 def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
329 cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
330 cursor = cnx.cursor()
332 self.logger.debug(cmd)
334 self.logger.debug('%s', cursor)
339 def find_file(self, file_name_keyword, file_ext, search_dir):
341 :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
342 :param file_ext: e.g., csar, json
343 :param search_dir path to search
344 :return: path name of the file
346 file_name_keyword = file_name_keyword.lower()
347 file_ext = file_ext.lower()
348 if not file_ext.startswith('.'):
349 file_ext = '.' + file_ext
352 for file_name in os.listdir(search_dir):
353 file_name_lower = file_name.lower()
354 if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
356 self.logger.error('Multiple files found for *{0}*.{1} in '
357 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
359 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
364 self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
368 def network_name_to_subnet_name(network_name):
370 :param network_name: example: vcpe_net_cpe_signal_201711281221
371 :return: vcpe_net_cpe_signal_subnet_201711281221
373 fields = network_name.split('_')
374 fields.insert(-1, 'subnet')
375 return '_'.join(fields)
377 def set_network_name(self, network_name):
378 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
379 openstackcmd = 'openstack ' + param
380 cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
383 def set_subnet_name(self, network_name):
385 Example: network_name = vcpe_net_cpe_signal_201711281221
386 set subnet name to vcpe_net_cpe_signal_subnet_201711281221
389 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
390 openstackcmd = 'openstack ' + param
392 # expected results: | subnets | subnet_id |
393 subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
394 if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
395 subnet_id = subnet_info[2].strip()
396 subnet_name = self.network_name_to_subnet_name(network_name)
397 cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
399 self.logger.info("Subnet name set to: " + subnet_name)
402 self.logger.error("Can't get subnet info from network name: " + network_name)
405 def set_closed_loop_policy(self, policy_template_file):
406 # Gather policy services cluster ips
407 p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
408 p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
410 # Read policy json from file
411 with open(policy_template_file) as f:
413 policy_json = json.load(f)
415 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
418 # Check policy already applied
419 policy_exists_req = requests.get(self.policy_pap_get_url.format(
420 p_pap_cluster_ip), auth=self.policy_userpass,
421 verify=False, headers=self.policy_headers)
422 if policy_exists_req.status_code != 200:
423 self.logger.error('Failure in checking CL policy existence. '
424 'Policy-pap responded with HTTP code {0}'.format(
425 policy_exists_req.status_code))
429 policy_exists_json = policy_exists_req.json()
430 except ValueError as e:
431 self.logger.error('Policy-pap request failed: ' + e.message)
435 assert policy_exists_json['groups'][0]['pdpSubgroups'] \
436 [1]['policies'][0]['name'] != 'operational.vcpe'
437 except AssertionError:
438 self.logger.info('vCPE closed loop policy already exists, not applying')
441 pass # policy doesn't exist
444 policy_create_req = requests.post(self.policy_api_url.format(
445 p_api_cluster_ip), auth=self.policy_userpass,
446 json=policy_json, verify=False,
447 headers=self.policy_headers)
448 # Get the policy id from policy-api response
449 if policy_create_req.status_code != 200:
450 self.logger.error('Failed creating policy. Policy-api responded'
451 ' with HTTP code {0}'.format(policy_create_req.status_code))
455 policy_version = json.loads(policy_create_req.text)['policy-version']
456 except (KeyError, ValueError):
457 self.logger.error('Policy API response not understood:')
458 self.logger.debug('\n' + str(policy_create_req.text))
460 # Inject the policy into Policy PAP
461 self.policy_pap_json['policies'].append({'policy-version': policy_version})
462 policy_insert_req = requests.post(self.policy_pap_post_url.format(
463 p_pap_cluster_ip), auth=self.policy_userpass,
464 json=self.policy_pap_json, verify=False,
465 headers=self.policy_headers)
466 if policy_insert_req.status_code != 200:
467 self.logger.error('Policy PAP request failed with HTTP code'
468 '{0}'.format(policy_insert_req.status_code))
470 self.logger.info('Successully pushed closed loop Policy')
472 def is_node_in_aai(self, node_type, node_uuid):
474 search_node_type = None
475 if node_type == 'service':
476 search_node_type = 'service-instance'
477 key = 'service-instance-id'
478 elif node_type == 'vnf':
479 search_node_type = 'generic-vnf'
482 logging.error('Invalid node_type: ' + node_type)
485 url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
486 self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
488 headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
489 r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
491 self.logger.debug('aai query: ' + url)
492 self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
493 return 'result-data' in response
496 def extract_ip_from_str(net_addr, net_addr_len, sz):
498 :param net_addr: e.g. 10.5.12.0
499 :param net_addr_len: e.g. 24
501 :return: the first IP address matching the network, e.g. 10.5.12.3
503 network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
504 ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
506 this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
507 if this_net == network:
511 def get_pod_node_oam_ip(self, pod):
513 :Assuming kubectl is available and configured by default config (~/.kube/config)
514 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
515 :return pod's cluster node oam ip (10.0.0.0/16)
518 config.load_kube_config()
519 api = client.CoreV1Api()
520 kslogger = logging.getLogger('kubernetes')
521 kslogger.setLevel(logging.INFO)
522 res = api.list_pod_for_all_namespaces()
524 if pod in i.metadata.name:
525 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
526 ret = i.status.host_ip
530 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
533 def get_pod_node_public_ip(self, pod):
535 :Assuming kubectl is available and configured by default config (~/.kube/config)
536 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
537 :return pod's cluster node public ip (i.e. 10.12.0.0/16)
540 config.load_kube_config()
541 api = client.CoreV1Api()
542 kslogger = logging.getLogger('kubernetes')
543 kslogger.setLevel(logging.INFO)
544 res = api.list_pod_for_all_namespaces()
546 if pod in i.metadata.name:
547 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
548 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
552 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
555 def get_vm_public_ip_by_nova(self, vm):
557 This method uses openstack nova api to retrieve vm public ip
561 subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
562 nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url'])
563 for i in nova.servers.list():
565 for k, v in i.networks.items():
567 if IPAddress(ip) in subnet:
571 def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
573 :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
574 :param net_addr: e.g. 10.12.5.0
575 :param net_addr_len: e.g. 24
576 :return: dictionary {keyword: ip}
579 net_addr = self.external_net_addr
582 net_addr_len = self.external_net_prefix_len
584 param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
585 openstackcmd = 'nova ' + param + ' list'
586 self.logger.debug(openstackcmd)
588 results = os.popen(openstackcmd).read()
589 all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
590 latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
591 latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
592 ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
594 ip_dict.update(self.get_oom_onap_vm_ip(keywords))
596 if len(ip_dict) != len(keywords):
597 self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
598 self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
599 self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
603 def get_oom_onap_vm_ip(self, keywords):
606 if vm in self.host_names:
607 vm_ip[vm] = self.oom_so_sdnc_aai_ip
610 def get_k8s_service_cluster_ip(self, service):
612 Returns cluster IP for a given service
613 :param service: name of the service
616 config.load_kube_config()
617 api = client.CoreV1Api()
618 kslogger = logging.getLogger('kubernetes')
619 kslogger.setLevel(logging.INFO)
621 resp = api.read_namespaced_service(service, self.onap_namespace)
622 except client.rest.ApiException as e:
623 self.logger.error('Error while making k8s API request: ' + e.body)
626 return resp.spec.cluster_ip
628 def get_k8s_service_endpoint_info(self, service, subset):
630 Returns endpoint data for a given service and subset. If there
631 is more than one endpoint returns data for the first one from
632 the list that API returned.
633 :param service: name of the service
634 :param subset: subset name, one of "ip","port"
637 config.load_kube_config()
638 api = client.CoreV1Api()
639 kslogger = logging.getLogger('kubernetes')
640 kslogger.setLevel(logging.INFO)
642 resp = api.read_namespaced_endpoints(service, self.onap_namespace)
643 except client.rest.ApiException as e:
644 self.logger.error('Error while making k8s API request: ' + e.body)
648 return resp.subsets[0].addresses[0].ip
649 elif subset == "port":
650 return resp.subsets[0].ports[0].port
652 self.logger.error("Unsupported subset type")
654 def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
656 for line in novalist_results.split('\n'):
657 fields = line.split('|')
661 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
662 vm_ip_dict[vm_name] = ip
666 def remove_old_vms(self, vm_list, prefix):
668 For vms with format name_timestamp, only keep the one with the latest timestamp.
670 zdcpe1cpe01brgemu01_201805222148 (drop this)
671 zdcpe1cpe01brgemu01_201805222229 (keep this)
672 zdcpe1cpe01gw01_201805162201
675 same_type_vm_dict = {}
677 fields = vm.split('_')
678 if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
679 if vm > same_type_vm_dict.get(fields[0], '0'):
680 same_type_vm_dict[fields[0]] = vm
682 new_vm_list.append(vm)
684 new_vm_list.extend(same_type_vm_dict.values())
687 def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
689 for keyword in vm_name_keyword_list:
690 for vm, ip in all_vm_ip_dict.items():
692 vm_ip_dict[keyword] = ip
696 def del_vgmux_ves_mode(self):
697 url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
698 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
699 self.logger.debug('%s', r)
701 def del_vgmux_ves_collector(self):
702 url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
703 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
704 self.logger.debug('%s', r)
706 def set_vgmux_ves_collector(self ):
707 url = self.vpp_ves_url.format(self.hosts['mux'])
709 {'server-addr': self.hosts[self.dcae_ves_collector_name],
710 'server-port': '30235' if self.oom_mode else '8081',
711 'read-interval': '10',
715 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
716 self.logger.debug('%s', r)
718 def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
719 url = self.vpp_ves_url.format(self.hosts['mux'])
721 {"working-mode": "demo",
722 "base-packet-loss": str(lossrate),
723 "source-name": vg_vnf_instance_name
726 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
727 self.logger.debug('%s', r)
729 # return all the VxLAN interface names of BRG or vGMUX based on the IP address
730 def get_vxlan_interfaces(self, ip, print_info=False):
731 url = self.vpp_inf_url.format(ip)
732 self.logger.debug('url is this: %s', url)
733 r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
734 data = r.json()['interfaces']['interface']
737 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
738 print(json.dumps(inf, indent=4, sort_keys=True))
740 return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
742 # delete all VxLAN interfaces of each hosts
743 def delete_vxlan_interfaces(self, host_dic):
744 for host, ip in host_dic.items():
746 self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
747 inf_list = self.get_vxlan_interfaces(ip)
751 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
752 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
753 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
758 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
759 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
760 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
762 if len(self.get_vxlan_interfaces(ip)) > 0:
763 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
767 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
771 def save_object(obj, filepathname):
772 with open(filepathname, 'wb') as fout:
773 pickle.dump(obj, fout)
776 def load_object(filepathname):
777 with open(filepathname, 'rb') as fin:
778 return pickle.load(fin)
781 def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
782 with open(vnf_template_file) as json_input:
783 json_data = json.load(json_input)
784 param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
785 for param in param_list:
786 if param['vnf-parameter-name'] in vnf_parameter_name_list:
787 ipaddr_or_vni = param['vnf-parameter-value'].split('.')
788 number = int(ipaddr_or_vni[-1])
793 ipaddr_or_vni[-1] = str(number)
794 param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
796 assert json_data is not None
797 with open(vnf_template_file, 'w') as json_output:
798 json.dump(json_data, json_output, indent=4, sort_keys=True)
800 def save_preload_data(self, preload_data):
801 self.save_object(preload_data, self.preload_dict_file)
803 def load_preload_data(self):
804 return self.load_object(self.preload_dict_file)
806 def save_vgmux_vnf_name(self, vgmux_vnf_name):
807 self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
809 def load_vgmux_vnf_name(self):
810 return self.load_object(self.vgmux_vnf_name_file)