11 import mysql.connector
15 from novaclient import client as openstackclient
16 from kubernetes import client, config
17 from netaddr import IPAddress, IPNetwork
19 ######################################################################
20 # Parts which must be updated / cross-checked during each deployment #
21 # are marked as CHANGEME #
22 ######################################################################
25 #############################################################################################
26 # Set network prefix of k8s host external address; it's used for pod public IP autodetection
27 # but can be overriden from user in case of autodetection failure
28 external_net_addr = '10.12.0.0'
29 external_net_prefix_len = 16
31 #############################################################################################
32 # set the openstack cloud access credentials here
35 #############################################################################################
36 # set the gra_api flag
37 # Mustn't be set to True until Frankfurt DGs are updated for GRA-API infrastructure
40 ###########################
41 # set Openstack credentials
44 '--os-auth-url': 'http://10.12.25.2:5000',
45 '--os-username': 'kxi',
46 '--os-user-domain-id': 'default',
47 '--os-project-domain-id': 'default',
48 '--os-tenant-id': '712b6016580e410b9abfec9ca34953ce' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
49 '--os-region-name': 'RegionOne',
50 '--os-password': 'n3JhGMGuDzD8',
51 '--os-project-domain-name': 'Integration-Release-Daily' if oom_mode else 'Integration-SB-07',
52 '--os-identity-api-version': '3'
55 ############################################################################
56 # set oam and public network which must exist in openstack before deployment
58 common_preload_config = {
59 'oam_onap_net': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
60 'oam_onap_subnet': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
61 'public_net': 'external',
62 'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
65 #############################################################################
66 # Set name of Onap's k8s namespace and sdnc controller pod
68 onap_namespace = 'onap'
69 onap_environment = 'dev'
70 sdnc_controller_pod = '-'.join([onap_environment, 'sdnc-sdnc-0'])
72 template_variable_symbol = '${'
73 cpe_vm_prefix = 'zdcpe'
75 #############################################################################################
76 # preloading network config
78 # value = [subnet_start_ip, subnet_gateway_ip]
79 preload_network_config = {
80 'cpe_public': ['10.2.0.2', '10.2.0.1'],
81 'cpe_signal': ['10.4.0.2', '10.4.0.1'],
82 'brg_bng': ['10.3.0.2', '10.3.0.1'],
83 'bng_mux': ['10.1.0.10', '10.1.0.1'],
84 'mux_gw': ['10.5.0.10', '10.5.0.1']
87 dcae_ves_collector_name = 'dcae-bootstrap'
88 global_subscriber_id = 'SDN-ETHERNET-INTERNET'
89 project_name = 'Project-Demonstration'
90 owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
91 owning_entity_name = 'OE-Demonstration1'
93 def __init__(self, extra_host_names=None):
94 self.logger = logging.getLogger(__name__)
95 self.logger.setLevel(logging.DEBUG)
96 self.logger.info('Initializing configuration')
98 ##################################################################################################################################
99 # following param must be updated e.g. from csar file (grep for VfModuleModelInvariantUuid string) before vcpe.py customer call !!
100 # vgw_VfModuleModelInvariantUuid is in rescust service csar,
101 # look in service-VcpesvcRescust1118-template.yml for groups vgw module metadata. TODO: read this value automatically
103 self.vgw_VfModuleModelInvariantUuid = '26d6a718-17b2-4ba8-8691-c44343b2ecd2'
105 # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
106 self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
107 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
108 self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
109 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
110 self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
111 # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
112 self.mr_ip_addr = self.oom_so_sdnc_aai_ip
113 self.mr_ip_port = '30227'
114 self.so_nbi_port = '30277' if self.oom_mode else '8080'
115 self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
116 self.aai_query_port = '30233' if self.oom_mode else '8443'
117 self.sniro_port = '30288' if self.oom_mode else '8080'
119 self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name, 'mariadb-galera']
121 self.host_names.extend(extra_host_names)
123 self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
124 # this is the keyword used to name vgw stack, must not be used in other stacks
125 self.vgw_name_keyword = 'base_vcpe_vgw'
126 # this is the file that will keep the index of last assigned SO name
127 self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
128 self.svc_instance_uuid_file = '__var/svc_instance_uuid'
129 self.preload_dict_file = '__var/preload_dict'
130 self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
131 self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
132 self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
133 self.instance_name_prefix = {
134 'service': 'vcpe_svc',
135 'network': 'vcpe_net',
137 'vfmodule': 'vcpe_vfmodule'
139 self.aai_userpass = 'AAI', 'AAI'
141 ############################################################################################################
142 # following key is overriding public key from vCPE heat templates, it's important to use correct one in here
144 self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
146 self.os_tenant_id = self.cloud['--os-tenant-id']
147 self.os_region_name = self.cloud['--os-region-name']
148 self.common_preload_config['pub_key'] = self.pub_key
149 self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
150 self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
151 self.homing_solution = 'sniro' # value is either 'sniro' or 'oof'
152 # self.homing_solution = 'oof'
153 self.customer_location_used_by_oof = {
154 "customerLatitude": "32.897480",
155 "customerLongitude": "-97.040443",
156 "customerName": "some_company"
159 #############################################################################################
161 self.sdc_be_port = '30204'
162 self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
163 self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
164 self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
165 self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
167 self.sdc_fe_port = '30207'
168 self.sdc_fe_request_userpass = 'beep', 'boop'
169 self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
170 self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
171 self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
172 self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
174 #############################################################################################
176 self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
177 self.sdnc_db_name = 'sdnctl'
178 self.sdnc_db_user = 'sdnctl'
179 self.sdnc_db_pass = 'gamma'
180 self.sdnc_db_port = self.get_k8s_service_endpoint_info('mariadb-galera','port') if self.oom_mode else '3306'
181 self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
182 self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
183 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
184 self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
185 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
186 self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
187 ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
188 self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
189 ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
190 self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
191 '/restconf/config/GENERIC-RESOURCE-API:'
193 #############################################################################################
194 # MARIADB-GALERA settings
195 self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
196 self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
198 #############################################################################################
199 # SO urls, note: do NOT add a '/' at the end of the url
200 self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
201 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
202 self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
203 self.so_userpass = 'InfraPortalClient', 'password1$'
204 self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
205 self.so_db_name = 'catalogdb'
206 self.so_db_user = 'root'
207 self.so_db_pass = 'secretpassword'
208 self.so_db_host = self.mariadb_galera_endpoint_ip if self.oom_mode else self.hosts['so']
209 self.so_db_port = self.mariadb_galera_endpoint_port if self.oom_mode else '3306'
211 self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
212 self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
213 self.vpp_api_userpass = ('admin', 'admin')
214 self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
216 #############################################################################################
218 self.policy_userpass = ('healthcheck', 'zb!XztG34')
219 self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
220 self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
221 self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
222 self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
223 self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
224 self.policy_api_service_name = 'policy-api'
225 self.policy_pap_service_name = 'policy-pap'
227 #############################################################################################
229 self.aai_region_query_url = 'https://' + self.oom_so_sdnc_aai_ip + ':' +\
230 self.aai_query_port +\
231 '/aai/v14/cloud-infrastructure/cloud-regions/cloud-region/CloudOwner/' +\
232 self.cloud['--os-region-name']
233 self.aai_headers = {'Accept': 'application/json',
234 'Content-Type': 'application/json',
235 'X-FromAppId': 'postman', 'X-TransactionId': '9999'}
237 def heatbridge(self, openstack_stack_name, svc_instance_uuid):
239 Add vserver information to AAI
241 self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
242 if not self.oom_mode:
243 cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
244 ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
245 self.logger.debug('%s', ret)
247 print('To add vGMUX vserver info to AAI, do the following:')
248 print('- ssh to rancher')
250 print('- cd /root/oom/kubernetes/robot')
251 print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
253 def get_brg_mac_from_sdnc(self):
255 Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
256 Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
259 db_host=self.mariadb_galera_endpoint_ip
261 db_host=self.hosts['mariadb-galera']
263 cnx = mysql.connector.connect(user=self.sdnc_db_user,
264 password=self.sdnc_db_pass,
265 database=self.sdnc_db_name,
267 port=self.sdnc_db_port)
268 cursor = cnx.cursor()
269 query = "SELECT * from DHCP_MAP"
270 cursor.execute(query)
272 self.logger.debug('DHCP_MAP table in SDNC')
275 for mac, ip in cursor:
276 self.logger.debug(mac + ' - ' + ip)
277 this_host = int(ip.split('.')[-1])
286 except AssertionError:
287 self.logger.error('Failed to obtain BRG MAC address from database')
292 def execute_cmds_mariadb(self, cmds):
293 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
294 self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
295 self.mariadb_galera_endpoint_port)
297 def execute_cmds_sdnc_db(self, cmds):
298 self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
299 self.hosts['sdnc'], self.sdnc_db_port)
301 def execute_cmds_so_db(self, cmds):
302 self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
303 self.so_db_host, self.so_db_port)
305 def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
306 cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
307 cursor = cnx.cursor()
309 self.logger.debug(cmd)
311 self.logger.debug('%s', cursor)
316 def find_file(self, file_name_keyword, file_ext, search_dir):
318 :param file_name_keyword: keyword used to look for the csar file, case insensitive matching, e.g, infra
319 :param file_ext: e.g., csar, json
320 :param search_dir path to search
321 :return: path name of the file
323 file_name_keyword = file_name_keyword.lower()
324 file_ext = file_ext.lower()
325 if not file_ext.startswith('.'):
326 file_ext = '.' + file_ext
329 for file_name in os.listdir(search_dir):
330 file_name_lower = file_name.lower()
331 if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
333 self.logger.error('Multiple files found for *{0}*.{1} in '
334 'directory {2}'.format(file_name_keyword, file_ext, search_dir))
336 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
341 self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
345 def network_name_to_subnet_name(network_name):
347 :param network_name: example: vcpe_net_cpe_signal_201711281221
348 :return: vcpe_net_cpe_signal_subnet_201711281221
350 fields = network_name.split('_')
351 fields.insert(-1, 'subnet')
352 return '_'.join(fields)
354 def set_network_name(self, network_name):
355 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
356 openstackcmd = 'openstack ' + param
357 cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
360 def set_subnet_name(self, network_name):
362 Example: network_name = vcpe_net_cpe_signal_201711281221
363 set subnet name to vcpe_net_cpe_signal_subnet_201711281221
366 param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
367 openstackcmd = 'openstack ' + param
369 # expected results: | subnets | subnet_id |
370 subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
371 if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
372 subnet_id = subnet_info[2].strip()
373 subnet_name = self.network_name_to_subnet_name(network_name)
374 cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
376 self.logger.info("Subnet name set to: " + subnet_name)
379 self.logger.error("Can't get subnet info from network name: " + network_name)
382 def set_closed_loop_policy(self, policy_template_file):
383 # Gather policy services cluster ips
384 p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
385 p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
387 # Read policy json from file
388 with open(policy_template_file) as f:
390 policy_json = json.load(f)
392 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
395 # Check policy already applied
396 policy_exists_req = requests.get(self.policy_pap_get_url.format(
397 p_pap_cluster_ip), auth=self.policy_userpass,
398 verify=False, headers=self.policy_headers)
399 if policy_exists_req.status_code != 200:
400 self.logger.error('Failure in checking CL policy existence. '
401 'Policy-pap responded with HTTP code {0}'.format(
402 policy_exists_req.status_code))
406 policy_exists_json = policy_exists_req.json()
407 except ValueError as e:
408 self.logger.error('Policy-pap request failed: ' + e.message)
412 assert policy_exists_json['groups'][0]['pdpSubgroups'] \
413 [1]['policies'][0]['name'] != 'operational.vcpe'
414 except AssertionError:
415 self.logger.info('vCPE closed loop policy already exists, not applying')
418 pass # policy doesn't exist
421 policy_create_req = requests.post(self.policy_api_url.format(
422 p_api_cluster_ip), auth=self.policy_userpass,
423 json=policy_json, verify=False,
424 headers=self.policy_headers)
425 # Get the policy id from policy-api response
426 if policy_create_req.status_code != 200:
427 self.logger.error('Failed creating policy. Policy-api responded'
428 ' with HTTP code {0}'.format(policy_create_req.status_code))
432 policy_version = json.loads(policy_create_req.text)['policy-version']
433 except (KeyError, ValueError):
434 self.logger.error('Policy API response not understood:')
435 self.logger.debug('\n' + str(policy_create_req.text))
437 # Inject the policy into Policy PAP
438 self.policy_pap_json['policies'].append({'policy-version': policy_version})
439 policy_insert_req = requests.post(self.policy_pap_post_url.format(
440 p_pap_cluster_ip), auth=self.policy_userpass,
441 json=self.policy_pap_json, verify=False,
442 headers=self.policy_headers)
443 if policy_insert_req.status_code != 200:
444 self.logger.error('Policy PAP request failed with HTTP code'
445 '{0}'.format(policy_insert_req.status_code))
447 self.logger.info('Successully pushed closed loop Policy')
449 def is_node_in_aai(self, node_type, node_uuid):
451 search_node_type = None
452 if node_type == 'service':
453 search_node_type = 'service-instance'
454 key = 'service-instance-id'
455 elif node_type == 'vnf':
456 search_node_type = 'generic-vnf'
459 logging.error('Invalid node_type: ' + node_type)
462 url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
463 self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
465 headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
466 r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
468 self.logger.debug('aai query: ' + url)
469 self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
470 return 'result-data' in response
473 def extract_ip_from_str(net_addr, net_addr_len, sz):
475 :param net_addr: e.g. 10.5.12.0
476 :param net_addr_len: e.g. 24
478 :return: the first IP address matching the network, e.g. 10.5.12.3
480 network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
481 ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
483 this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
484 if this_net == network:
488 def get_pod_node_oam_ip(self, pod):
490 :Assuming kubectl is available and configured by default config (~/.kube/config)
491 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
492 :return pod's cluster node oam ip (10.0.0.0/16)
495 config.load_kube_config()
496 api = client.CoreV1Api()
497 kslogger = logging.getLogger('kubernetes')
498 kslogger.setLevel(logging.INFO)
499 res = api.list_pod_for_all_namespaces()
501 if pod in i.metadata.name:
502 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
503 ret = i.status.host_ip
507 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
510 def get_pod_node_public_ip(self, pod):
512 :Assuming kubectl is available and configured by default config (~/.kube/config)
513 :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
514 :return pod's cluster node public ip (i.e. 10.12.0.0/16)
517 config.load_kube_config()
518 api = client.CoreV1Api()
519 kslogger = logging.getLogger('kubernetes')
520 kslogger.setLevel(logging.INFO)
521 res = api.list_pod_for_all_namespaces()
523 if pod in i.metadata.name:
524 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
525 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
529 ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
532 def get_vm_public_ip_by_nova(self, vm):
534 This method uses openstack nova api to retrieve vm public ip
538 subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
539 nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url'])
540 for i in nova.servers.list():
542 for k, v in i.networks.items():
544 if IPAddress(ip) in subnet:
548 def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
550 :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
551 :param net_addr: e.g. 10.12.5.0
552 :param net_addr_len: e.g. 24
553 :return: dictionary {keyword: ip}
556 net_addr = self.external_net_addr
559 net_addr_len = self.external_net_prefix_len
561 param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
562 openstackcmd = 'nova ' + param + ' list'
563 self.logger.debug(openstackcmd)
565 results = os.popen(openstackcmd).read()
566 all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
567 latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
568 latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
569 ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
571 ip_dict.update(self.get_oom_onap_vm_ip(keywords))
573 if len(ip_dict) != len(keywords):
574 self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
575 self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
576 self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
580 def get_oom_onap_vm_ip(self, keywords):
583 if vm in self.host_names:
584 vm_ip[vm] = self.oom_so_sdnc_aai_ip
587 def get_k8s_service_cluster_ip(self, service):
589 Returns cluster IP for a given service
590 :param service: name of the service
593 config.load_kube_config()
594 api = client.CoreV1Api()
595 kslogger = logging.getLogger('kubernetes')
596 kslogger.setLevel(logging.INFO)
598 resp = api.read_namespaced_service(service, self.onap_namespace)
599 except client.rest.ApiException as e:
600 self.logger.error('Error while making k8s API request: ' + e.body)
603 return resp.spec.cluster_ip
605 def get_k8s_service_endpoint_info(self, service, subset):
607 Returns endpoint data for a given service and subset. If there
608 is more than one endpoint returns data for the first one from
609 the list that API returned.
610 :param service: name of the service
611 :param subset: subset name, one of "ip","port"
614 config.load_kube_config()
615 api = client.CoreV1Api()
616 kslogger = logging.getLogger('kubernetes')
617 kslogger.setLevel(logging.INFO)
619 resp = api.read_namespaced_endpoints(service, self.onap_namespace)
620 except client.rest.ApiException as e:
621 self.logger.error('Error while making k8s API request: ' + e.body)
625 return resp.subsets[0].addresses[0].ip
626 elif subset == "port":
627 return resp.subsets[0].ports[0].port
629 self.logger.error("Unsupported subset type")
631 def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
633 for line in novalist_results.split('\n'):
634 fields = line.split('|')
638 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
639 vm_ip_dict[vm_name] = ip
643 def remove_old_vms(self, vm_list, prefix):
645 For vms with format name_timestamp, only keep the one with the latest timestamp.
647 zdcpe1cpe01brgemu01_201805222148 (drop this)
648 zdcpe1cpe01brgemu01_201805222229 (keep this)
649 zdcpe1cpe01gw01_201805162201
652 same_type_vm_dict = {}
654 fields = vm.split('_')
655 if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
656 if vm > same_type_vm_dict.get(fields[0], '0'):
657 same_type_vm_dict[fields[0]] = vm
659 new_vm_list.append(vm)
661 new_vm_list.extend(same_type_vm_dict.values())
664 def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
666 for keyword in vm_name_keyword_list:
667 for vm, ip in all_vm_ip_dict.items():
669 vm_ip_dict[keyword] = ip
673 def del_vgmux_ves_mode(self):
674 url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
675 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
676 self.logger.debug('%s', r)
678 def del_vgmux_ves_collector(self):
679 url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
680 r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
681 self.logger.debug('%s', r)
683 def set_vgmux_ves_collector(self ):
684 url = self.vpp_ves_url.format(self.hosts['mux'])
686 {'server-addr': self.hosts[self.dcae_ves_collector_name],
687 'server-port': '30235' if self.oom_mode else '8081',
688 'read-interval': '10',
692 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
693 self.logger.debug('%s', r)
695 def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
696 url = self.vpp_ves_url.format(self.hosts['mux'])
698 {"working-mode": "demo",
699 "base-packet-loss": str(lossrate),
700 "source-name": vg_vnf_instance_name
703 r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
704 self.logger.debug('%s', r)
706 # return all the VxLAN interface names of BRG or vGMUX based on the IP address
707 def get_vxlan_interfaces(self, ip, print_info=False):
708 url = self.vpp_inf_url.format(ip)
709 self.logger.debug('url is this: %s', url)
710 r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
711 data = r.json()['interfaces']['interface']
714 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
715 print(json.dumps(inf, indent=4, sort_keys=True))
717 return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
719 # delete all VxLAN interfaces of each hosts
720 def delete_vxlan_interfaces(self, host_dic):
721 for host, ip in host_dic.items():
723 self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
724 inf_list = self.get_vxlan_interfaces(ip)
728 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
729 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
730 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
735 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
736 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
737 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
739 if len(self.get_vxlan_interfaces(ip)) > 0:
740 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
744 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
748 def save_object(obj, filepathname):
749 with open(filepathname, 'wb') as fout:
750 pickle.dump(obj, fout)
753 def load_object(filepathname):
754 with open(filepathname, 'rb') as fin:
755 return pickle.load(fin)
758 def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
759 with open(vnf_template_file) as json_input:
760 json_data = json.load(json_input)
761 param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
762 for param in param_list:
763 if param['vnf-parameter-name'] in vnf_parameter_name_list:
764 ipaddr_or_vni = param['vnf-parameter-value'].split('.')
765 number = int(ipaddr_or_vni[-1])
770 ipaddr_or_vni[-1] = str(number)
771 param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
773 assert json_data is not None
774 with open(vnf_template_file, 'w') as json_output:
775 json.dump(json_data, json_output, indent=4, sort_keys=True)
777 def save_preload_data(self, preload_data):
778 self.save_object(preload_data, self.preload_dict_file)
780 def load_preload_data(self):
781 return self.load_object(self.preload_dict_file)
783 def save_vgmux_vnf_name(self, vgmux_vnf_name):
784 self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
786 def load_vgmux_vnf_name(self):
787 return self.load_object(self.vgmux_vnf_name_file)