11 import mysql.connector
15 from novaclient import client as openstackclient
16 from kubernetes import client, config
17 from netaddr import IPAddress, IPNetwork
19 ######################################################################
20 # Parts which must be updated / cross-checked during each deployment #
21 # are marked as CHANGEME #
22 ######################################################################
25 #############################################################################################
26 # Set network prefix of k8s host external address; it's used for pod public IP autodetection
27 # but can be overriden from user in case of autodetection failure
28 external_net_addr = '10.12.0.0'
29 external_net_prefix_len = 16
31 #############################################################################################
32 # set the openstack cloud access credentials here
35 #############################################################################################
36 # set the gra_api flag
37 # Mustn't be set to True until Frankfurt DGs are updated for GRA-API infrastructure
40 ###########################
41 # set Openstack credentials
44 '--os-auth-url': 'http://10.12.25.2:5000',
45 '--os-username': 'kxi',
46 '--os-user-domain-id': 'default',
47 '--os-project-domain-id': 'default',
48 '--os-tenant-id': '712b6016580e410b9abfec9ca34953ce' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
49 '--os-region-name': 'RegionOne',
50 '--os-password': 'n3JhGMGuDzD8',
51 '--os-project-domain-name': 'Integration-Release-Daily' if oom_mode else 'Integration-SB-07',
52 '--os-identity-api-version': '3'
55 ############################################################################
56 # set oam and public network which must exist in openstack before deployment
58 common_preload_config = {
59 'oam_onap_net': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
60 'oam_onap_subnet': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
61 'public_net': 'external',
62 'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
65 #############################################################################
66 # Set name of Onap's k8s namespace and sdnc controller pod
68 onap_namespace = 'onap'
69 onap_environment = 'dev'
70 sdnc_controller_pod = '-'.join([onap_environment, 'sdnc-sdnc-0'])
72 template_variable_symbol = '${'
73 cpe_vm_prefix = 'zdcpe'
75 #############################################################################################
76 # preloading network config
78 # value = [subnet_start_ip, subnet_gateway_ip]
79 preload_network_config = {
80 'cpe_public': ['10.2.0.2', '10.2.0.1'],
81 'cpe_signal': ['10.4.0.2', '10.4.0.1'],
82 'brg_bng': ['10.3.0.2', '10.3.0.1'],
83 'bng_mux': ['10.1.0.10', '10.1.0.1'],
84 'mux_gw': ['10.5.0.10', '10.5.0.1']
87 dcae_ves_collector_name = 'dcae-bootstrap'
88 global_subscriber_id = 'SDN-ETHERNET-INTERNET'
89 project_name = 'Project-Demonstration'
90 owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
91 owning_entity_name = 'OE-Demonstration1'
93 def __init__(self, extra_host_names=None):
94 self.logger = logging.getLogger(__name__)
95 self.logger.setLevel(logging.DEBUG)
96 self.logger.info('Initializing configuration')
98 # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
99 self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
100 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
101 self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
102 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
103 self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
104 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
105 self.mr_ip_addr = self.oom_so_sdnc_aai_ip
106 self.mr_ip_port = '30227'
107 self.so_nbi_port = '30277' if self.oom_mode else '8080'
108 self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
109 self.aai_query_port = '30233' if self.oom_mode else '8443'
110 self.sniro_port = '30288' if self.oom_mode else '8080'
112 self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name, 'mariadb-galera']
114 self.host_names.extend(extra_host_names)
116 self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
117 # this is the keyword used to name vgw stack, must not be used in other stacks
118 self.vgw_name_keyword = 'base_vcpe_vgw'
119 # this is the file that will keep the index of last assigned SO name
120 self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
121 self.svc_instance_uuid_file = '__var/svc_instance_uuid'
122 self.preload_dict_file = '__var/preload_dict'
123 self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
124 self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
125 self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
126 self.instance_name_prefix = {
127 'service': 'vcpe_svc',
128 'network': 'vcpe_net',
130 'vfmodule': 'vcpe_vfmodule'
132 self.aai_userpass = 'AAI', 'AAI'
134 ############################################################################################################
135 # following key is overriding public key from vCPE heat templates, it's important to use correct one in here
137 self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
139 self.os_tenant_id = self.cloud['--os-tenant-id']
140 self.os_region_name = self.cloud['--os-region-name']
141 self.common_preload_config['pub_key'] = self.pub_key
142 self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
143 self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
144 self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
145 # self.homing_solution = 'oof'
146 self.customer_location_used_by_oof = {
147 "customerLatitude": "32.897480",
148 "customerLongitude": "-97.040443",
149 "customerName": "some_company"
152 #############################################################################################
154 self.sdc_be_port = '30204'
155 self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
156 self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
157 self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
158 self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
160 self.sdc_fe_port = '30207'
161 self.sdc_fe_request_userpass = 'beep', 'boop'
162 self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
163 self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
164 self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
165 self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
167 #############################################################################################
169 self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
170 self.sdnc_db_name = 'sdnctl'
171 self.sdnc_db_user = 'sdnctl'
172 self.sdnc_db_pass = 'gamma'
173 self.sdnc_db_port = self.get_k8s_service_endpoint_info('mariadb-galera','port') if self.oom_mode else '3306'
174 self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
175 self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
176 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
177 self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
178 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
179 self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
180 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
181 self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
182 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
183 self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
184 '/restconf/config/GENERIC-RESOURCE-API:'
186 #############################################################################################
187 # MARIADB-GALERA settings
188 self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
189 self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
191 #############################################################################################
192 # SO urls, note: do NOT add a '/' at the end of the url
193 self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
194 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
195 self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
196 self.so_userpass = 'InfraPortalClient', 'password1$'
197 self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
198 self.so_db_name = 'catalogdb'
199 self.so_db_user = 'root'
200 self.so_db_pass = 'secretpassword'
201 self.so_db_host = self.mariadb_galera_endpoint_ip if self.oom_mode else self.hosts['so']
202 self.so_db_port = self.mariadb_galera_endpoint_port if self.oom_mode else '3306'
204 self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
205 self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
206 self.vpp_api_userpass = ('admin', 'admin')
207 self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
209 #############################################################################################
211 self.policy_userpass = ('healthcheck', 'zb!XztG34')
212 self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
213 self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
214 self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
215 self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
216 self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
217 self.policy_api_service_name = 'policy-api'
218 self.policy_pap_service_name = 'policy-pap'
220 #############################################################################################
222 self.aai_region_query_url = 'https://' + self.oom_so_sdnc_aai_ip + ':' +\
223 self.aai_query_port +\
224 '/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/' +\
225 self.cloud['--os-region-name']
226 self.aai_headers = {'Accept': 'application/json',
227 'Content-Type': 'application/json',
228 'X-FromAppId': 'postman', 'X-TransactionId': '9999'}
230 def heatbridge(self, openstack_stack_name, svc_instance_uuid):
232 Add vserver information to AAI
234 self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
235 if not self.oom_mode:
236 cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
237 ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
238 self.logger.debug('%s', ret)
240 print('To add vGMUX vserver info to AAI, do the following:')
241 print('- ssh to rancher')
243 print('- cd /root/oom/kubernetes/robot')
244 print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
246 def get_brg_mac_from_sdnc(self):
248 Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
249 Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
252 db_host=self.mariadb_galera_endpoint_ip
254 db_host=self.hosts['mariadb-galera']
256 cnx = mysql.connector.connect(user=self.sdnc_db_user,
257 password=self.sdnc_db_pass,
258 database=self.sdnc_db_name,
260 port=self.sdnc_db_port)
261 cursor = cnx.cursor()
262 query = "SELECT * from DHCP_MAP"
263 cursor.execute(query)
265 self.logger.debug('DHCP_MAP table in SDNC')
268 for mac, ip in cursor:
269 self.logger.debug(mac + ' - ' + ip)
270 this_host = int(ip.split('.')[-1])
279 except AssertionError:
280 self.logger.error('Failed to obtain BRG MAC address from database')
285 def execute_cmds_mariadb(self, cmds):
286 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
287 self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
288 self.mariadb_galera_endpoint_port)
290 def execute_cmds_sdnc_db(self, cmds):
291 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
292 self.hosts['sdnc'], self.sdnc_db_port)
294 def execute_cmds_so_db(self, cmds):
295 self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
296 self.so_db_host, self.so_db_port)
298 def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
299 cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
300 cursor = cnx.cursor()
302 self.logger.debug(cmd)
304 self.logger.debug('%s', cursor)
309 def find_file(self, file_name_keyword, file_ext, search_dir):
311 :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
312 :param file_ext: e.g., csar, json
313 :param search_dir path to search
314 :return: path name of the file
316 file_name_keyword = file_name_keyword.lower()
317 file_ext = file_ext.lower()
318 if not file_ext.startswith('.'):
319 file_ext = '.' + file_ext
322 for file_name in os.listdir(search_dir):
323 file_name_lower = file_name.lower()
324 if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
326 self.logger.error('Multiple files found for *{0}*.{1} in '
327 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
329 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
334 self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
338 def network_name_to_subnet_name(network_name):
340 :param network_name: example: vcpe_net_cpe_signal_201711281221
341 :return: vcpe_net_cpe_signal_subnet_201711281221
343 fields = network_name.split('_')
344 fields.insert(-1, 'subnet')
345 return '_'.join(fields)
347 def set_network_name(self, network_name):
348 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
349 openstackcmd = 'openstack ' + param
350 cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
353 def set_subnet_name(self, network_name):
355 Example: network_name = vcpe_net_cpe_signal_201711281221
356 set subnet name to vcpe_net_cpe_signal_subnet_201711281221
359 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
360 openstackcmd = 'openstack ' + param
362 # expected results: | subnets | subnet_id |
363 subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
364 if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
365 subnet_id = subnet_info[2].strip()
366 subnet_name = self.network_name_to_subnet_name(network_name)
367 cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
369 self.logger.info("Subnet name set to: " + subnet_name)
372 self.logger.error("Can't get subnet info from network name: " + network_name)
375 def set_closed_loop_policy(self, policy_template_file):
376 # Gather policy services cluster ips
377 p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
378 p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
380 # Read policy json from file
381 with open(policy_template_file) as f:
383 policy_json = json.load(f)
385 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
388 # Check policy already applied
389 policy_exists_req = requests.get(self.policy_pap_get_url.format(
390 p_pap_cluster_ip), auth=self.policy_userpass,
391 verify=False, headers=self.policy_headers)
392 if policy_exists_req.status_code != 200:
393 self.logger.error('Failure in checking CL policy existence. '
394 'Policy-pap responded with HTTP code {0}'.format(
395 policy_exists_req.status_code))
399 policy_exists_json = policy_exists_req.json()
400 except ValueError as e:
401 self.logger.error('Policy-pap request failed: ' + e.message)
405 assert policy_exists_json['groups'][0]['pdpSubgroups'] \
406 [1]['policies'][0]['name'] != 'operational.vcpe'
407 except AssertionError:
408 self.logger.info('vCPE closed loop policy already exists, not applying')
411 pass # policy doesn't exist
414 policy_create_req = requests.post(self.policy_api_url.format(
415 p_api_cluster_ip), auth=self.policy_userpass,
416 json=policy_json, verify=False,
417 headers=self.policy_headers)
418 # Get the policy id from policy-api response
419 if policy_create_req.status_code != 200:
420 self.logger.error('Failed creating policy. Policy-api responded'
421 ' with HTTP code {0}'.format(policy_create_req.status_code))
425 policy_version = json.loads(policy_create_req.text)['policy-version']
426 except (KeyError, ValueError):
427 self.logger.error('Policy API response not understood:')
428 self.logger.debug('\n' + str(policy_create_req.text))
430 # Inject the policy into Policy PAP
431 self.policy_pap_json['policies'].append({'policy-version': policy_version})
432 policy_insert_req = requests.post(self.policy_pap_post_url.format(
433 p_pap_cluster_ip), auth=self.policy_userpass,
434 json=self.policy_pap_json, verify=False,
435 headers=self.policy_headers)
436 if policy_insert_req.status_code != 200:
437 self.logger.error('Policy PAP request failed with HTTP code'
438 '{0}'.format(policy_insert_req.status_code))
440 self.logger.info('Successully pushed closed loop Policy')
442 def is_node_in_aai(self, node_type, node_uuid):
444 search_node_type = None
445 if node_type == 'service':
446 search_node_type = 'service-instance'
447 key = 'service-instance-id'
448 elif node_type == 'vnf':
449 search_node_type = 'generic-vnf'
452 logging.error('Invalid node_type: ' + node_type)
455 url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
456 self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
458 headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
459 r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
461 self.logger.debug('aai query: ' + url)
462 self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
463 return 'result-data' in response
466 def extract_ip_from_str(net_addr, net_addr_len, sz):
468 :param net_addr: e.g. 10.5.12.0
469 :param net_addr_len: e.g. 24
471 :return: the first IP address matching the network, e.g. 10.5.12.3
473 network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
474 ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
476 this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
477 if this_net == network:
481 def get_pod_node_oam_ip(self, pod):
483 :Assuming kubectl is available and configured by default config (~/.kube/config)
484 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
485 :return pod's cluster node oam ip (10.0.0.0/16)
488 config.load_kube_config()
489 api = client.CoreV1Api()
490 kslogger = logging.getLogger('kubernetes')
491 kslogger.setLevel(logging.INFO)
492 res = api.list_pod_for_all_namespaces()
494 if pod in i.metadata.name:
495 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
496 ret = i.status.host_ip
500 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
503 def get_pod_node_public_ip(self, pod):
505 :Assuming kubectl is available and configured by default config (~/.kube/config)
506 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
507 :return pod's cluster node public ip (i.e. 10.12.0.0/16)
510 config.load_kube_config()
511 api = client.CoreV1Api()
512 kslogger = logging.getLogger('kubernetes')
513 kslogger.setLevel(logging.INFO)
514 res = api.list_pod_for_all_namespaces()
516 if pod in i.metadata.name:
517 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
518 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
522 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
525 def get_vm_public_ip_by_nova(self, vm):
527 This method uses openstack nova api to retrieve vm public ip
531 subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
532 nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url'])
533 for i in nova.servers.list():
535 for k, v in i.networks.items():
537 if IPAddress(ip) in subnet:
541 def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
543 :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
544 :param net_addr: e.g. 10.12.5.0
545 :param net_addr_len: e.g. 24
546 :return: dictionary {keyword: ip}
549 net_addr = self.external_net_addr
552 net_addr_len = self.external_net_prefix_len
554 param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
555 openstackcmd = 'nova ' + param + ' list'
556 self.logger.debug(openstackcmd)
558 results = os.popen(openstackcmd).read()
559 all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
560 latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
561 latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
562 ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
564 ip_dict.update(self.get_oom_onap_vm_ip(keywords))
566 if len(ip_dict) != len(keywords):
567 self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
568 self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
569 self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
573 def get_oom_onap_vm_ip(self, keywords):
576 if vm in self.host_names:
577 vm_ip[vm] = self.oom_so_sdnc_aai_ip
580 def get_k8s_service_cluster_ip(self, service):
582 Returns cluster IP for a given service
583 :param service: name of the service
586 config.load_kube_config()
587 api = client.CoreV1Api()
588 kslogger = logging.getLogger('kubernetes')
589 kslogger.setLevel(logging.INFO)
591 resp = api.read_namespaced_service(service, self.onap_namespace)
592 except client.rest.ApiException as e:
593 self.logger.error('Error while making k8s API request: ' + e.body)
596 return resp.spec.cluster_ip
598 def get_k8s_service_endpoint_info(self, service, subset):
600 Returns endpoint data for a given service and subset. If there
601 is more than one endpoint returns data for the first one from
602 the list that API returned.
603 :param service: name of the service
604 :param subset: subset name, one of "ip","port"
607 config.load_kube_config()
608 api = client.CoreV1Api()
609 kslogger = logging.getLogger('kubernetes')
610 kslogger.setLevel(logging.INFO)
612 resp = api.read_namespaced_endpoints(service, self.onap_namespace)
613 except client.rest.ApiException as e:
614 self.logger.error('Error while making k8s API request: ' + e.body)
618 return resp.subsets[0].addresses[0].ip
619 elif subset == "port":
620 return resp.subsets[0].ports[0].port
622 self.logger.error("Unsupported subset type")
624 def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
626 for line in novalist_results.split('\n'):
627 fields = line.split('|')
631 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
632 vm_ip_dict[vm_name] = ip
636 def remove_old_vms(self, vm_list, prefix):
638 For vms with format name_timestamp, only keep the one with the latest timestamp.
640 zdcpe1cpe01brgemu01_201805222148 (drop this)
641 zdcpe1cpe01brgemu01_201805222229 (keep this)
642 zdcpe1cpe01gw01_201805162201
645 same_type_vm_dict = {}
647 fields = vm.split('_')
648 if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
649 if vm > same_type_vm_dict.get(fields[0], '0'):
650 same_type_vm_dict[fields[0]] = vm
652 new_vm_list.append(vm)
654 new_vm_list.extend(same_type_vm_dict.values())
657 def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
659 for keyword in vm_name_keyword_list:
660 for vm, ip in all_vm_ip_dict.items():
662 vm_ip_dict[keyword] = ip
666 def del_vgmux_ves_mode(self):
667 url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
668 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
669 self.logger.debug('%s', r)
671 def del_vgmux_ves_collector(self):
672 url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
673 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
674 self.logger.debug('%s', r)
676 def set_vgmux_ves_collector(self ):
677 url = self.vpp_ves_url.format(self.hosts['mux'])
679 {'server-addr': self.hosts[self.dcae_ves_collector_name],
680 'server-port': '30235' if self.oom_mode else '8081',
681 'read-interval': '10',
685 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
686 self.logger.debug('%s', r)
688 def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
689 url = self.vpp_ves_url.format(self.hosts['mux'])
691 {"working-mode": "demo",
692 "base-packet-loss": str(lossrate),
693 "source-name": vg_vnf_instance_name
696 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
697 self.logger.debug('%s', r)
699 # return all the VxLAN interface names of BRG or vGMUX based on the IP address
700 def get_vxlan_interfaces(self, ip, print_info=False):
701 url = self.vpp_inf_url.format(ip)
702 self.logger.debug('url is this: %s', url)
703 r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
704 data = r.json()['interfaces']['interface']
707 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
708 print(json.dumps(inf, indent=4, sort_keys=True))
710 return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
712 # delete all VxLAN interfaces of each hosts
713 def delete_vxlan_interfaces(self, host_dic):
714 for host, ip in host_dic.items():
716 self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
717 inf_list = self.get_vxlan_interfaces(ip)
721 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
722 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
723 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
728 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
729 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
730 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
732 if len(self.get_vxlan_interfaces(ip)) > 0:
733 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
737 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
741 def save_object(obj, filepathname):
742 with open(filepathname, 'wb') as fout:
743 pickle.dump(obj, fout)
746 def load_object(filepathname):
747 with open(filepathname, 'rb') as fin:
748 return pickle.load(fin)
751 def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
752 with open(vnf_template_file) as json_input:
753 json_data = json.load(json_input)
754 param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
755 for param in param_list:
756 if param['vnf-parameter-name'] in vnf_parameter_name_list:
757 ipaddr_or_vni = param['vnf-parameter-value'].split('.')
758 number = int(ipaddr_or_vni[-1])
763 ipaddr_or_vni[-1] = str(number)
764 param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
766 assert json_data is not None
767 with open(vnf_template_file, 'w') as json_output:
768 json.dump(json_data, json_output, indent=4, sort_keys=True)
770 def save_preload_data(self, preload_data):
771 self.save_object(preload_data, self.preload_dict_file)
773 def load_preload_data(self):
774 return self.load_object(self.preload_dict_file)
776 def save_vgmux_vnf_name(self, vgmux_vnf_name):
777 self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
779 def load_vgmux_vnf_name(self):
780 return self.load_object(self.vgmux_vnf_name_file)