11 import mysql.connector
16 from novaclient import client as openstackclient
17 from kubernetes import client, config
18 from netaddr import IPAddress, IPNetwork
22 def __init__(self, extra_host_names=None):
23 self.logger = logging.getLogger(__name__)
24 self.logger.setLevel(logging.DEBUG)
25 self.logger.info('Initializing configuration')
27 # Read configuration from config file
30 self.sdnc_controller_pod = '-'.join([self.onap_environment, 'sdnc-sdnc-0'])
31 # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
32 self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
33 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
34 self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
35 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
36 self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
37 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
38 self.mr_ip_addr = self.oom_so_sdnc_aai_ip
39 self.mr_ip_port = '30227'
40 self.so_nbi_port = '30277' if self.oom_mode else '8080'
41 self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
42 self.aai_query_port = '30233' if self.oom_mode else '8443'
43 self.sniro_port = '30288' if self.oom_mode else '8080'
45 self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name, 'mariadb-galera']
47 self.host_names.extend(extra_host_names)
49 self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
50 # this is the keyword used to name vgw stack, must not be used in other stacks
51 self.vgw_name_keyword = 'base_vcpe_vgw'
52 # this is the file that will keep the index of last assigned SO name
53 self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
54 self.svc_instance_uuid_file = '__var/svc_instance_uuid'
55 self.preload_dict_file = '__var/preload_dict'
56 self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
57 self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
58 self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
59 self.instance_name_prefix = {
60 'service': 'vcpe_svc',
61 'network': 'vcpe_net',
63 'vfmodule': 'vcpe_vfmodule'
65 self.aai_userpass = 'AAI', 'AAI'
66 self.os_tenant_id = self.cloud['--os-tenant-id']
67 self.os_region_name = self.cloud['--os-region-name']
68 self.common_preload_config['pub_key'] = self.pub_key
69 self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
70 self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
71 self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
72 # self.homing_solution = 'oof'
73 self.customer_location_used_by_oof = {
74 "customerLatitude": "32.897480",
75 "customerLongitude": "-97.040443",
76 "customerName": "some_company"
79 #############################################################################################
81 self.sdc_be_port = '30204'
82 self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
83 self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
84 self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
85 self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
87 self.sdc_fe_port = '30207'
88 self.sdc_fe_request_userpass = 'beep', 'boop'
89 self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
90 self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
91 self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
92 self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
94 #############################################################################################
96 self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
97 self.sdnc_db_name = 'sdnctl'
98 self.sdnc_db_user = 'sdnctl'
99 self.sdnc_db_pass = 'gamma'
100 self.sdnc_db_port = self.get_k8s_service_endpoint_info('mariadb-galera','port') if self.oom_mode else '3306'
101 self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
102 self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
103 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
104 self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
105 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
106 self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
107 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
108 self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
109 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
110 self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
111 '/restconf/config/GENERIC-RESOURCE-API:'
113 #############################################################################################
114 # MARIADB-GALERA settings
115 self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
116 self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
118 #############################################################################################
119 # SO urls, note: do NOT add a '/' at the end of the url
120 self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
121 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
122 self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
123 self.so_userpass = 'InfraPortalClient', 'password1$'
124 self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
125 self.so_db_name = 'catalogdb'
126 self.so_db_user = 'root'
127 self.so_db_pass = 'secretpassword'
128 self.so_db_host = self.mariadb_galera_endpoint_ip if self.oom_mode else self.hosts['so']
129 self.so_db_port = self.mariadb_galera_endpoint_port if self.oom_mode else '3306'
131 self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
132 self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
133 self.vpp_api_userpass = ('admin', 'admin')
134 self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
136 #############################################################################################
138 self.policy_userpass = ('healthcheck', 'zb!XztG34')
139 self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
140 self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
141 self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
142 self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
143 self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
144 self.policy_api_service_name = 'policy-api'
145 self.policy_pap_service_name = 'policy-pap'
147 #############################################################################################
149 self.aai_region_query_url = 'https://' + self.oom_so_sdnc_aai_ip + ':' +\
150 self.aai_query_port +\
151 '/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/' +\
152 self.cloud['--os-region-name']
153 self.aai_headers = {'Accept': 'application/json',
154 'Content-Type': 'application/json',
155 'X-FromAppId': 'postman', 'X-TransactionId': '9999'}
157 def _load_config(self, cfg_file='vcpeconfig.yaml'):
159 Reads vcpe config file and injects settings as object's attributes
160 :param cfg_file: Configuration file path
164 with open(cfg_file, 'r') as cfg:
165 cfg_yml = yaml.full_load(cfg)
166 except Exception as e:
167 self.logger.error('Error loading configuration: ' + str(e))
170 self.logger.debug('\n' + yaml.dump(cfg_yml))
172 # Use setattr to load config file keys as VcpeCommon class' object
175 # Check config isn't empty
176 if cfg_yml is not None:
177 for cfg_key in cfg_yml:
178 setattr(self, cfg_key, cfg_yml[cfg_key])
179 except TypeError as e:
180 self.logger.error('Unable to parse config file: ' + str(e))
183 def heatbridge(self, openstack_stack_name, svc_instance_uuid):
185 Add vserver information to AAI
187 self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
188 if not self.oom_mode:
189 cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
190 ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
191 self.logger.debug('%s', ret)
193 print('To add vGMUX vserver info to AAI, do the following:')
194 print('- ssh to rancher')
196 print('- cd /root/oom/kubernetes/robot')
197 print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
199 def get_brg_mac_from_sdnc(self):
201 Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
202 Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
205 db_host=self.mariadb_galera_endpoint_ip
207 db_host=self.hosts['mariadb-galera']
209 cnx = mysql.connector.connect(user=self.sdnc_db_user,
210 password=self.sdnc_db_pass,
211 database=self.sdnc_db_name,
213 port=self.sdnc_db_port)
214 cursor = cnx.cursor()
215 query = "SELECT * from DHCP_MAP"
216 cursor.execute(query)
218 self.logger.debug('DHCP_MAP table in SDNC')
221 for mac, ip in cursor:
222 self.logger.debug(mac + ' - ' + ip)
223 this_host = int(ip.split('.')[-1])
232 except AssertionError:
233 self.logger.error('Failed to obtain BRG MAC address from database')
238 def execute_cmds_mariadb(self, cmds):
239 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
240 self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
241 self.mariadb_galera_endpoint_port)
243 def execute_cmds_sdnc_db(self, cmds):
244 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
245 self.hosts['sdnc'], self.sdnc_db_port)
247 def execute_cmds_so_db(self, cmds):
248 self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
249 self.so_db_host, self.so_db_port)
251 def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
252 cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
253 cursor = cnx.cursor()
255 self.logger.debug(cmd)
257 self.logger.debug('%s', cursor)
262 def find_file(self, file_name_keyword, file_ext, search_dir):
264 :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
265 :param file_ext: e.g., csar, json
266 :param search_dir path to search
267 :return: path name of the file
269 file_name_keyword = file_name_keyword.lower()
270 file_ext = file_ext.lower()
271 if not file_ext.startswith('.'):
272 file_ext = '.' + file_ext
275 for file_name in os.listdir(search_dir):
276 file_name_lower = file_name.lower()
277 if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
279 self.logger.error('Multiple files found for *{0}*.{1} in '
280 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
282 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
287 self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
291 def network_name_to_subnet_name(network_name):
293 :param network_name: example: vcpe_net_cpe_signal_201711281221
294 :return: vcpe_net_cpe_signal_subnet_201711281221
296 fields = network_name.split('_')
297 fields.insert(-1, 'subnet')
298 return '_'.join(fields)
300 def set_network_name(self, network_name):
301 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
302 openstackcmd = 'openstack ' + param
303 cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
306 def set_subnet_name(self, network_name):
308 Example: network_name = vcpe_net_cpe_signal_201711281221
309 set subnet name to vcpe_net_cpe_signal_subnet_201711281221
312 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
313 openstackcmd = 'openstack ' + param
315 # expected results: | subnets | subnet_id |
316 subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
317 if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
318 subnet_id = subnet_info[2].strip()
319 subnet_name = self.network_name_to_subnet_name(network_name)
320 cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
322 self.logger.info("Subnet name set to: " + subnet_name)
325 self.logger.error("Can't get subnet info from network name: " + network_name)
328 def set_closed_loop_policy(self, policy_template_file):
329 # Gather policy services cluster ips
330 p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
331 p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
333 # Read policy json from file
334 with open(policy_template_file) as f:
336 policy_json = json.load(f)
338 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
341 # Check policy already applied
342 policy_exists_req = requests.get(self.policy_pap_get_url.format(
343 p_pap_cluster_ip), auth=self.policy_userpass,
344 verify=False, headers=self.policy_headers)
345 if policy_exists_req.status_code != 200:
346 self.logger.error('Failure in checking CL policy existence. '
347 'Policy-pap responded with HTTP code {0}'.format(
348 policy_exists_req.status_code))
352 policy_exists_json = policy_exists_req.json()
353 except ValueError as e:
354 self.logger.error('Policy-pap request failed: ' + e.message)
358 assert policy_exists_json['groups'][0]['pdpSubgroups'] \
359 [1]['policies'][0]['name'] != 'operational.vcpe'
360 except AssertionError:
361 self.logger.info('vCPE closed loop policy already exists, not applying')
364 pass # policy doesn't exist
367 policy_create_req = requests.post(self.policy_api_url.format(
368 p_api_cluster_ip), auth=self.policy_userpass,
369 json=policy_json, verify=False,
370 headers=self.policy_headers)
371 # Get the policy id from policy-api response
372 if policy_create_req.status_code != 200:
373 self.logger.error('Failed creating policy. Policy-api responded'
374 ' with HTTP code {0}'.format(policy_create_req.status_code))
378 policy_version = json.loads(policy_create_req.text)['policy-version']
379 except (KeyError, ValueError):
380 self.logger.error('Policy API response not understood:')
381 self.logger.debug('\n' + str(policy_create_req.text))
383 # Inject the policy into Policy PAP
384 self.policy_pap_json['policies'].append({'policy-version': policy_version})
385 policy_insert_req = requests.post(self.policy_pap_post_url.format(
386 p_pap_cluster_ip), auth=self.policy_userpass,
387 json=self.policy_pap_json, verify=False,
388 headers=self.policy_headers)
389 if policy_insert_req.status_code != 200:
390 self.logger.error('Policy PAP request failed with HTTP code'
391 '{0}'.format(policy_insert_req.status_code))
393 self.logger.info('Successully pushed closed loop Policy')
395 def is_node_in_aai(self, node_type, node_uuid):
397 search_node_type = None
398 if node_type == 'service':
399 search_node_type = 'service-instance'
400 key = 'service-instance-id'
401 elif node_type == 'vnf':
402 search_node_type = 'generic-vnf'
405 logging.error('Invalid node_type: ' + node_type)
408 url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
409 self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
411 headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
412 r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
414 self.logger.debug('aai query: ' + url)
415 self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
416 return 'result-data' in response
419 def extract_ip_from_str(net_addr, net_addr_len, sz):
421 :param net_addr: e.g. 10.5.12.0
422 :param net_addr_len: e.g. 24
424 :return: the first IP address matching the network, e.g. 10.5.12.3
426 network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
427 ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
429 this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
430 if this_net == network:
434 def get_pod_node_oam_ip(self, pod):
436 :Assuming kubectl is available and configured by default config (~/.kube/config)
437 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
438 :return pod's cluster node oam ip (10.0.0.0/16)
441 config.load_kube_config()
442 api = client.CoreV1Api()
443 kslogger = logging.getLogger('kubernetes')
444 kslogger.setLevel(logging.INFO)
445 res = api.list_pod_for_all_namespaces()
447 if pod in i.metadata.name:
448 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
449 ret = i.status.host_ip
453 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
456 def get_pod_node_public_ip(self, pod):
458 :Assuming kubectl is available and configured by default config (~/.kube/config)
459 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
460 :return pod's cluster node public ip (i.e. 10.12.0.0/16)
463 config.load_kube_config()
464 api = client.CoreV1Api()
465 kslogger = logging.getLogger('kubernetes')
466 kslogger.setLevel(logging.INFO)
467 res = api.list_pod_for_all_namespaces()
469 if pod in i.metadata.name:
470 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
471 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
475 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
478 def get_vm_public_ip_by_nova(self, vm):
480 This method uses openstack nova api to retrieve vm public ip
484 subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
485 nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url'])
486 for i in nova.servers.list():
488 for k, v in i.networks.items():
490 if IPAddress(ip) in subnet:
494 def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
496 :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
497 :param net_addr: e.g. 10.12.5.0
498 :param net_addr_len: e.g. 24
499 :return: dictionary {keyword: ip}
502 net_addr = self.external_net_addr
505 net_addr_len = self.external_net_prefix_len
507 param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
508 openstackcmd = 'nova ' + param + ' list'
509 self.logger.debug(openstackcmd)
511 results = os.popen(openstackcmd).read()
512 all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
513 latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
514 latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
515 ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
517 ip_dict.update(self.get_oom_onap_vm_ip(keywords))
519 if len(ip_dict) != len(keywords):
520 self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
521 self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
522 self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
526 def get_oom_onap_vm_ip(self, keywords):
529 if vm in self.host_names:
530 vm_ip[vm] = self.oom_so_sdnc_aai_ip
533 def get_k8s_service_cluster_ip(self, service):
535 Returns cluster IP for a given service
536 :param service: name of the service
539 config.load_kube_config()
540 api = client.CoreV1Api()
541 kslogger = logging.getLogger('kubernetes')
542 kslogger.setLevel(logging.INFO)
544 resp = api.read_namespaced_service(service, self.onap_namespace)
545 except client.rest.ApiException as e:
546 self.logger.error('Error while making k8s API request: ' + e.body)
549 return resp.spec.cluster_ip
551 def get_k8s_service_endpoint_info(self, service, subset):
553 Returns endpoint data for a given service and subset. If there
554 is more than one endpoint returns data for the first one from
555 the list that API returned.
556 :param service: name of the service
557 :param subset: subset name, one of "ip","port"
560 config.load_kube_config()
561 api = client.CoreV1Api()
562 kslogger = logging.getLogger('kubernetes')
563 kslogger.setLevel(logging.INFO)
565 resp = api.read_namespaced_endpoints(service, self.onap_namespace)
566 except client.rest.ApiException as e:
567 self.logger.error('Error while making k8s API request: ' + e.body)
571 return resp.subsets[0].addresses[0].ip
572 elif subset == "port":
573 return resp.subsets[0].ports[0].port
575 self.logger.error("Unsupported subset type")
577 def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
579 for line in novalist_results.split('\n'):
580 fields = line.split('|')
584 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
585 vm_ip_dict[vm_name] = ip
589 def remove_old_vms(self, vm_list, prefix):
591 For vms with format name_timestamp, only keep the one with the latest timestamp.
593 zdcpe1cpe01brgemu01_201805222148 (drop this)
594 zdcpe1cpe01brgemu01_201805222229 (keep this)
595 zdcpe1cpe01gw01_201805162201
598 same_type_vm_dict = {}
600 fields = vm.split('_')
601 if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
602 if vm > same_type_vm_dict.get(fields[0], '0'):
603 same_type_vm_dict[fields[0]] = vm
605 new_vm_list.append(vm)
607 new_vm_list.extend(same_type_vm_dict.values())
610 def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
612 for keyword in vm_name_keyword_list:
613 for vm, ip in all_vm_ip_dict.items():
615 vm_ip_dict[keyword] = ip
619 def del_vgmux_ves_mode(self):
620 url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
621 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
622 self.logger.debug('%s', r)
624 def del_vgmux_ves_collector(self):
625 url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
626 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
627 self.logger.debug('%s', r)
629 def set_vgmux_ves_collector(self ):
630 url = self.vpp_ves_url.format(self.hosts['mux'])
632 {'server-addr': self.hosts[self.dcae_ves_collector_name],
633 'server-port': '30235' if self.oom_mode else '8081',
634 'read-interval': '10',
638 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
639 self.logger.debug('%s', r)
641 def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
642 url = self.vpp_ves_url.format(self.hosts['mux'])
644 {"working-mode": "demo",
645 "base-packet-loss": str(lossrate),
646 "source-name": vg_vnf_instance_name
649 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
650 self.logger.debug('%s', r)
652 # return all the VxLAN interface names of BRG or vGMUX based on the IP address
653 def get_vxlan_interfaces(self, ip, print_info=False):
654 url = self.vpp_inf_url.format(ip)
655 self.logger.debug('url is this: %s', url)
656 r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
657 data = r.json()['interfaces']['interface']
660 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
661 print(json.dumps(inf, indent=4, sort_keys=True))
663 return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
665 # delete all VxLAN interfaces of each hosts
666 def delete_vxlan_interfaces(self, host_dic):
667 for host, ip in host_dic.items():
669 self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
670 inf_list = self.get_vxlan_interfaces(ip)
674 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
675 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
676 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
681 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
682 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
683 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
685 if len(self.get_vxlan_interfaces(ip)) > 0:
686 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
690 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
694 def save_object(obj, filepathname):
695 with open(filepathname, 'wb') as fout:
696 pickle.dump(obj, fout)
699 def load_object(filepathname):
700 with open(filepathname, 'rb') as fin:
701 return pickle.load(fin)
704 def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
705 with open(vnf_template_file) as json_input:
706 json_data = json.load(json_input)
707 param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
708 for param in param_list:
709 if param['vnf-parameter-name'] in vnf_parameter_name_list:
710 ipaddr_or_vni = param['vnf-parameter-value'].split('.')
711 number = int(ipaddr_or_vni[-1])
716 ipaddr_or_vni[-1] = str(number)
717 param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
719 assert json_data is not None
720 with open(vnf_template_file, 'w') as json_output:
721 json.dump(json_data, json_output, indent=4, sort_keys=True)
723 def save_preload_data(self, preload_data):
724 self.save_object(preload_data, self.preload_dict_file)
726 def load_preload_data(self):
727 return self.load_object(self.preload_dict_file)
729 def save_vgmux_vnf_name(self, vgmux_vnf_name):
730 self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
732 def load_vgmux_vnf_name(self):
733 return self.load_object(self.vgmux_vnf_name_file)