11 import mysql.connector
15 from novaclient import client as openstackclient
16 from kubernetes import client, config
17 from netaddr import IPAddress, IPNetwork
19 ######################################################################
20 # Parts which must be updated / cross-checked during each deployment #
21 # are marked as CHANGEME #
22 ######################################################################
25 #############################################################################################
26 # Set network prefix of k8s host external address; it's used for pod public IP autodetection
27 # but can be overriden from user in case of autodetection failure
28 external_net_addr = '10.12.0.0'
29 external_net_prefix_len = 16
31 #############################################################################################
32 # set the openstack cloud access credentials here
35 #############################################################################################
36 # set the gra_api flag
37 # Mustn't be set to True until Frankfurt DGs are updated for GRA-API infrastructure
40 ###########################
41 # set Openstack credentials
44 '--os-auth-url': 'http://10.12.25.2:5000',
45 '--os-username': 'kxi',
46 '--os-user-domain-id': 'default',
47 '--os-project-domain-id': 'default',
48 '--os-tenant-id': '712b6016580e410b9abfec9ca34953ce' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
49 '--os-region-name': 'RegionOne',
50 '--os-password': 'n3JhGMGuDzD8',
51 '--os-project-domain-name': 'Integration-Release-Daily' if oom_mode else 'Integration-SB-07',
52 '--os-identity-api-version': '3'
55 ############################################################################
56 # set oam and public network which must exist in openstack before deployment
58 common_preload_config = {
59 'oam_onap_net': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
60 'oam_onap_subnet': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
61 'public_net': 'external',
62 'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
65 #############################################################################
66 # Set name of Onap's k8s namespace and sdnc controller pod
68 onap_namespace = 'onap'
69 onap_environment = 'dev'
70 sdnc_controller_pod = '-'.join([onap_environment, 'sdnc-sdnc-0'])
72 template_variable_symbol = '${'
73 cpe_vm_prefix = 'zdcpe'
75 #############################################################################################
76 # preloading network config
78 # value = [subnet_start_ip, subnet_gateway_ip]
79 preload_network_config = {
80 'cpe_public': ['10.2.0.2', '10.2.0.1'],
81 'cpe_signal': ['10.4.0.2', '10.4.0.1'],
82 'brg_bng': ['10.3.0.2', '10.3.0.1'],
83 'bng_mux': ['10.1.0.10', '10.1.0.1'],
84 'mux_gw': ['10.5.0.10', '10.5.0.1']
87 dcae_ves_collector_name = 'dcae-bootstrap'
88 global_subscriber_id = 'SDN-ETHERNET-INTERNET'
89 project_name = 'Project-Demonstration'
90 owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
91 owning_entity_name = 'OE-Demonstration1'
93 def __init__(self, extra_host_names=None):
94 self.logger = logging.getLogger(__name__)
95 self.logger.setLevel(logging.DEBUG)
96 self.logger.info('Initializing configuration')
98 ##################################################################################################################################
99 # following param must be updated e.g. from csar file (grep for VfModuleModelInvariantUuid string) before vcpe.py customer call !!
100 # vgw_VfModuleModelInvariantUuid is in rescust service csar,
101 # look in service-VcpesvcRescust1118-template.yml for groups vgw module metadata. TODO: read this value automatically
103 self.vgw_VfModuleModelInvariantUuid = '26d6a718-17b2-4ba8-8691-c44343b2ecd2'
105 # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
106 self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
107 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
108 self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
109 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
110 self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
111 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
112 self.mr_ip_addr = self.oom_so_sdnc_aai_ip
113 self.mr_ip_port = '30227'
114 self.so_nbi_port = '30277' if self.oom_mode else '8080'
115 self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
116 self.aai_query_port = '30233' if self.oom_mode else '8443'
117 self.sniro_port = '30288' if self.oom_mode else '8080'
119 self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name, 'mariadb-galera']
121 self.host_names.extend(extra_host_names)
123 self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
124 # this is the keyword used to name vgw stack, must not be used in other stacks
125 self.vgw_name_keyword = 'base_vcpe_vgw'
126 # this is the file that will keep the index of last assigned SO name
127 self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
128 self.svc_instance_uuid_file = '__var/svc_instance_uuid'
129 self.preload_dict_file = '__var/preload_dict'
130 self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
131 self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
132 self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
133 self.instance_name_prefix = {
134 'service': 'vcpe_svc',
135 'network': 'vcpe_net',
137 'vfmodule': 'vcpe_vfmodule'
139 self.aai_userpass = 'AAI', 'AAI'
141 ############################################################################################################
142 # following key is overriding public key from vCPE heat templates, it's important to use correct one in here
144 self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
146 self.os_tenant_id = self.cloud['--os-tenant-id']
147 self.os_region_name = self.cloud['--os-region-name']
148 self.common_preload_config['pub_key'] = self.pub_key
149 self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
150 self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
151 self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
152 # self.homing_solution = 'oof'
153 self.customer_location_used_by_oof = {
154 "customerLatitude": "32.897480",
155 "customerLongitude": "-97.040443",
156 "customerName": "some_company"
159 #############################################################################################
161 self.sdc_be_port = '30204'
162 self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
163 self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
164 self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
165 self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
167 self.sdc_fe_port = '30207'
168 self.sdc_fe_request_userpass = 'beep', 'boop'
169 self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
170 self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
171 self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
172 self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
174 #############################################################################################
176 self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
177 self.sdnc_db_name = 'sdnctl'
178 self.sdnc_db_user = 'sdnctl'
179 self.sdnc_db_pass = 'gamma'
180 self.sdnc_db_port = self.get_k8s_service_endpoint_info('mariadb-galera','port') if self.oom_mode else '3306'
181 self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
182 self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
183 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
184 self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
185 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
186 self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
187 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
188 self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
189 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
190 self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
191 '/restconf/config/GENERIC-RESOURCE-API:'
193 #############################################################################################
194 # SO urls, note: do NOT add a '/' at the end of the url
195 self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
196 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
197 self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
198 self.so_userpass = 'InfraPortalClient', 'password1$'
199 self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
200 self.so_db_name = 'catalogdb'
201 self.so_db_user = 'root'
202 self.so_db_pass = 'secretpassword'
203 self.so_db_port = '30252' if self.oom_mode else '32769'
205 self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
206 self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
207 self.vpp_api_userpass = ('admin', 'admin')
208 self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
210 #############################################################################################
212 self.policy_userpass = ('healthcheck', 'zb!XztG34')
213 self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
214 self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
215 self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
216 self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
217 self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
218 self.policy_api_service_name = 'policy-api'
219 self.policy_pap_service_name = 'policy-pap'
221 #############################################################################################
222 # MARIADB-GALERA settings
223 self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
224 self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
226 #############################################################################################
228 self.aai_region_query_url = 'https://' + self.oom_so_sdnc_aai_ip + ':' +\
229 self.aai_query_port +\
230 '/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/' +\
231 self.cloud['--os-region-name']
232 self.aai_headers = {'Accept': 'application/json',
233 'Content-Type': 'application/json',
234 'X-FromAppId': 'postman', 'X-TransactionId': '9999'}
236 def heatbridge(self, openstack_stack_name, svc_instance_uuid):
238 Add vserver information to AAI
240 self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
241 if not self.oom_mode:
242 cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
243 ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
244 self.logger.debug('%s', ret)
246 print('To add vGMUX vserver info to AAI, do the following:')
247 print('- ssh to rancher')
249 print('- cd /root/oom/kubernetes/robot')
250 print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
252 def get_brg_mac_from_sdnc(self):
254 Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
255 Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
258 db_host=self.mariadb_galera_endpoint_ip
260 db_host=self.hosts['mariadb-galera']
262 cnx = mysql.connector.connect(user=self.sdnc_db_user,
263 password=self.sdnc_db_pass,
264 database=self.sdnc_db_name,
266 port=self.sdnc_db_port)
267 cursor = cnx.cursor()
268 query = "SELECT * from DHCP_MAP"
269 cursor.execute(query)
271 self.logger.debug('DHCP_MAP table in SDNC')
274 for mac, ip in cursor:
275 self.logger.debug(mac + ' - ' + ip)
276 this_host = int(ip.split('.')[-1])
285 except AssertionError:
286 self.logger.error('Failed to obtain BRG MAC address from database')
291 def execute_cmds_mariadb(self, cmds):
292 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
293 self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
294 self.mariadb_galera_endpoint_port)
296 def execute_cmds_sdnc_db(self, cmds):
297 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
298 self.hosts['sdnc'], self.sdnc_db_port)
300 def execute_cmds_so_db(self, cmds):
301 self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
302 self.hosts['so'], self.so_db_port)
304 def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
305 cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
306 cursor = cnx.cursor()
308 self.logger.debug(cmd)
310 self.logger.debug('%s', cursor)
315 def find_file(self, file_name_keyword, file_ext, search_dir):
317 :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
318 :param file_ext: e.g., csar, json
319 :param search_dir path to search
320 :return: path name of the file
322 file_name_keyword = file_name_keyword.lower()
323 file_ext = file_ext.lower()
324 if not file_ext.startswith('.'):
325 file_ext = '.' + file_ext
328 for file_name in os.listdir(search_dir):
329 file_name_lower = file_name.lower()
330 if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
332 self.logger.error('Multiple files found for *{0}*.{1} in '
333 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
335 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
340 self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
344 def network_name_to_subnet_name(network_name):
346 :param network_name: example: vcpe_net_cpe_signal_201711281221
347 :return: vcpe_net_cpe_signal_subnet_201711281221
349 fields = network_name.split('_')
350 fields.insert(-1, 'subnet')
351 return '_'.join(fields)
353 def set_network_name(self, network_name):
354 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
355 openstackcmd = 'openstack ' + param
356 cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
359 def set_subnet_name(self, network_name):
361 Example: network_name = vcpe_net_cpe_signal_201711281221
362 set subnet name to vcpe_net_cpe_signal_subnet_201711281221
365 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
366 openstackcmd = 'openstack ' + param
368 # expected results: | subnets | subnet_id |
369 subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
370 if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
371 subnet_id = subnet_info[2].strip()
372 subnet_name = self.network_name_to_subnet_name(network_name)
373 cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
375 self.logger.info("Subnet name set to: " + subnet_name)
378 self.logger.error("Can't get subnet info from network name: " + network_name)
381 def set_closed_loop_policy(self, policy_template_file):
382 # Gather policy services cluster ips
383 p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
384 p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
386 # Read policy json from file
387 with open(policy_template_file) as f:
389 policy_json = json.load(f)
391 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
394 # Check policy already applied
395 policy_exists_req = requests.get(self.policy_pap_get_url.format(
396 p_pap_cluster_ip), auth=self.policy_userpass,
397 verify=False, headers=self.policy_headers)
398 if policy_exists_req.status_code != 200:
399 self.logger.error('Failure in checking CL policy existence. '
400 'Policy-pap responded with HTTP code {0}'.format(
401 policy_exists_req.status_code))
405 policy_exists_json = policy_exists_req.json()
406 except ValueError as e:
407 self.logger.error('Policy-pap request failed: ' + e.message)
411 assert policy_exists_json['groups'][0]['pdpSubgroups'] \
412 [1]['policies'][0]['name'] != 'operational.vcpe'
413 except AssertionError:
414 self.logger.info('vCPE closed loop policy already exists, not applying')
417 pass # policy doesn't exist
420 policy_create_req = requests.post(self.policy_api_url.format(
421 p_api_cluster_ip), auth=self.policy_userpass,
422 json=policy_json, verify=False,
423 headers=self.policy_headers)
424 # Get the policy id from policy-api response
425 if policy_create_req.status_code != 200:
426 self.logger.error('Failed creating policy. Policy-api responded'
427 ' with HTTP code {0}'.format(policy_create_req.status_code))
431 policy_version = json.loads(policy_create_req.text)['policy-version']
432 except (KeyError, ValueError):
433 self.logger.error('Policy API response not understood:')
434 self.logger.debug('\n' + str(policy_create_req.text))
436 # Inject the policy into Policy PAP
437 self.policy_pap_json['policies'].append({'policy-version': policy_version})
438 policy_insert_req = requests.post(self.policy_pap_post_url.format(
439 p_pap_cluster_ip), auth=self.policy_userpass,
440 json=self.policy_pap_json, verify=False,
441 headers=self.policy_headers)
442 if policy_insert_req.status_code != 200:
443 self.logger.error('Policy PAP request failed with HTTP code'
444 '{0}'.format(policy_insert_req.status_code))
446 self.logger.info('Successully pushed closed loop Policy')
448 def is_node_in_aai(self, node_type, node_uuid):
450 search_node_type = None
451 if node_type == 'service':
452 search_node_type = 'service-instance'
453 key = 'service-instance-id'
454 elif node_type == 'vnf':
455 search_node_type = 'generic-vnf'
458 logging.error('Invalid node_type: ' + node_type)
461 url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
462 self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
464 headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
465 r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
467 self.logger.debug('aai query: ' + url)
468 self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
469 return 'result-data' in response
472 def extract_ip_from_str(net_addr, net_addr_len, sz):
474 :param net_addr: e.g. 10.5.12.0
475 :param net_addr_len: e.g. 24
477 :return: the first IP address matching the network, e.g. 10.5.12.3
479 network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
480 ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
482 this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
483 if this_net == network:
487 def get_pod_node_oam_ip(self, pod):
489 :Assuming kubectl is available and configured by default config (~/.kube/config)
490 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
491 :return pod's cluster node oam ip (10.0.0.0/16)
494 config.load_kube_config()
495 api = client.CoreV1Api()
496 kslogger = logging.getLogger('kubernetes')
497 kslogger.setLevel(logging.INFO)
498 res = api.list_pod_for_all_namespaces()
500 if pod in i.metadata.name:
501 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
502 ret = i.status.host_ip
506 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
509 def get_pod_node_public_ip(self, pod):
511 :Assuming kubectl is available and configured by default config (~/.kube/config)
512 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
513 :return pod's cluster node public ip (i.e. 10.12.0.0/16)
516 config.load_kube_config()
517 api = client.CoreV1Api()
518 kslogger = logging.getLogger('kubernetes')
519 kslogger.setLevel(logging.INFO)
520 res = api.list_pod_for_all_namespaces()
522 if pod in i.metadata.name:
523 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
524 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
528 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
531 def get_vm_public_ip_by_nova(self, vm):
533 This method uses openstack nova api to retrieve vm public ip
537 subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
538 nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url'])
539 for i in nova.servers.list():
541 for k, v in i.networks.items():
543 if IPAddress(ip) in subnet:
547 def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
549 :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
550 :param net_addr: e.g. 10.12.5.0
551 :param net_addr_len: e.g. 24
552 :return: dictionary {keyword: ip}
555 net_addr = self.external_net_addr
558 net_addr_len = self.external_net_prefix_len
560 param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
561 openstackcmd = 'nova ' + param + ' list'
562 self.logger.debug(openstackcmd)
564 results = os.popen(openstackcmd).read()
565 all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
566 latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
567 latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
568 ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
570 ip_dict.update(self.get_oom_onap_vm_ip(keywords))
572 if len(ip_dict) != len(keywords):
573 self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
574 self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
575 self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
579 def get_oom_onap_vm_ip(self, keywords):
582 if vm in self.host_names:
583 vm_ip[vm] = self.oom_so_sdnc_aai_ip
586 def get_k8s_service_cluster_ip(self, service):
588 Returns cluster IP for a given service
589 :param service: name of the service
592 config.load_kube_config()
593 api = client.CoreV1Api()
594 kslogger = logging.getLogger('kubernetes')
595 kslogger.setLevel(logging.INFO)
597 resp = api.read_namespaced_service(service, self.onap_namespace)
598 except client.rest.ApiException as e:
599 self.logger.error('Error while making k8s API request: ' + e.body)
602 return resp.spec.cluster_ip
604 def get_k8s_service_endpoint_info(self, service, subset):
606 Returns endpoint data for a given service and subset. If there
607 is more than one endpoint returns data for the first one from
608 the list that API returned.
609 :param service: name of the service
610 :param subset: subset name, one of "ip","port"
613 config.load_kube_config()
614 api = client.CoreV1Api()
615 kslogger = logging.getLogger('kubernetes')
616 kslogger.setLevel(logging.INFO)
618 resp = api.read_namespaced_endpoints(service, self.onap_namespace)
619 except client.rest.ApiException as e:
620 self.logger.error('Error while making k8s API request: ' + e.body)
624 return resp.subsets[0].addresses[0].ip
625 elif subset == "port":
626 return resp.subsets[0].ports[0].port
628 self.logger.error("Unsupported subset type")
630 def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
632 for line in novalist_results.split('\n'):
633 fields = line.split('|')
637 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
638 vm_ip_dict[vm_name] = ip
642 def remove_old_vms(self, vm_list, prefix):
644 For vms with format name_timestamp, only keep the one with the latest timestamp.
646 zdcpe1cpe01brgemu01_201805222148 (drop this)
647 zdcpe1cpe01brgemu01_201805222229 (keep this)
648 zdcpe1cpe01gw01_201805162201
651 same_type_vm_dict = {}
653 fields = vm.split('_')
654 if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
655 if vm > same_type_vm_dict.get(fields[0], '0'):
656 same_type_vm_dict[fields[0]] = vm
658 new_vm_list.append(vm)
660 new_vm_list.extend(same_type_vm_dict.values())
663 def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
665 for keyword in vm_name_keyword_list:
666 for vm, ip in all_vm_ip_dict.items():
668 vm_ip_dict[keyword] = ip
672 def del_vgmux_ves_mode(self):
673 url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
674 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
675 self.logger.debug('%s', r)
677 def del_vgmux_ves_collector(self):
678 url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
679 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
680 self.logger.debug('%s', r)
682 def set_vgmux_ves_collector(self ):
683 url = self.vpp_ves_url.format(self.hosts['mux'])
685 {'server-addr': self.hosts[self.dcae_ves_collector_name],
686 'server-port': '30235' if self.oom_mode else '8081',
687 'read-interval': '10',
691 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
692 self.logger.debug('%s', r)
694 def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
695 url = self.vpp_ves_url.format(self.hosts['mux'])
697 {"working-mode": "demo",
698 "base-packet-loss": str(lossrate),
699 "source-name": vg_vnf_instance_name
702 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
703 self.logger.debug('%s', r)
705 # return all the VxLAN interface names of BRG or vGMUX based on the IP address
706 def get_vxlan_interfaces(self, ip, print_info=False):
707 url = self.vpp_inf_url.format(ip)
708 self.logger.debug('url is this: %s', url)
709 r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
710 data = r.json()['interfaces']['interface']
713 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
714 print(json.dumps(inf, indent=4, sort_keys=True))
716 return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
718 # delete all VxLAN interfaces of each hosts
719 def delete_vxlan_interfaces(self, host_dic):
720 for host, ip in host_dic.items():
722 self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
723 inf_list = self.get_vxlan_interfaces(ip)
727 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
728 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
729 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
734 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
735 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
736 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
738 if len(self.get_vxlan_interfaces(ip)) > 0:
739 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
743 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
747 def save_object(obj, filepathname):
748 with open(filepathname, 'wb') as fout:
749 pickle.dump(obj, fout)
752 def load_object(filepathname):
753 with open(filepathname, 'rb') as fin:
754 return pickle.load(fin)
757 def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
758 with open(vnf_template_file) as json_input:
759 json_data = json.load(json_input)
760 param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
761 for param in param_list:
762 if param['vnf-parameter-name'] in vnf_parameter_name_list:
763 ipaddr_or_vni = param['vnf-parameter-value'].split('.')
764 number = int(ipaddr_or_vni[-1])
769 ipaddr_or_vni[-1] = str(number)
770 param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
772 assert json_data is not None
773 with open(vnf_template_file, 'w') as json_output:
774 json.dump(json_data, json_output, indent=4, sort_keys=True)
776 def save_preload_data(self, preload_data):
777 self.save_object(preload_data, self.preload_dict_file)
779 def load_preload_data(self):
780 return self.load_object(self.preload_dict_file)
782 def save_vgmux_vnf_name(self, vgmux_vnf_name):
783 self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
785 def load_vgmux_vnf_name(self):
786 return self.load_object(self.vgmux_vnf_name_file)