Get the BRG MAC address automatically
[integration.git] / test / vcpe / vcpecommon.py
1 #!/usr/bin/env python
2
3 import json
4 import logging
5 import os
6 import pickle
7 import re
8 import sys
9
10 import ipaddress
11 import mysql.connector
12 import requests
13 import commands
14 import time
15 from novaclient import client as openstackclient
16 from kubernetes import client, config
17 from netaddr import IPAddress, IPNetwork
18
19 ######################################################################
20 # Parts which must be updated / cross-checked during each deployment #
21 # are marked as CHANGEME                                             #
22 ######################################################################
23
24 class VcpeCommon:
25     #############################################################################################
26     # Set network prefix of k8s host external address; it's used for pod public IP autodetection
27     # but can be overriden from user in case of autodetection failure
28     external_net_addr = '10.12.0.0'
29     external_net_prefix_len = 16
30
31     #############################################################################################
32     # set the openstack cloud access credentials here
33     oom_mode = True
34
35     #############################################################################################
36     # set the gra_api flag
37     # Mustn't be set to True until Frankfurt DGs are updated for GRA-API infrastructure
38     gra_api_flag= False
39
40     ###########################
41     # set Openstack credentials
42     # CHANGEME part
43     cloud = {
44         '--os-auth-url': 'http://10.12.25.2:5000',
45         '--os-username': 'kxi',
46         '--os-user-domain-id': 'default',
47         '--os-project-domain-id': 'default',
48         '--os-tenant-id': '712b6016580e410b9abfec9ca34953ce' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
49         '--os-region-name': 'RegionOne',
50         '--os-password': 'n3JhGMGuDzD8',
51         '--os-project-domain-name': 'Integration-Release-Daily' if oom_mode else 'Integration-SB-07',
52         '--os-identity-api-version': '3'
53     }
54
55     ############################################################################
56     # set oam and public network which must exist in openstack before deployment
57     # CHANGEME part
58     common_preload_config = {
59         'oam_onap_net': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
60         'oam_onap_subnet': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
61         'public_net': 'external',
62         'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
63     }
64
65     #############################################################################
66     # Set name of Onap's k8s namespace and sdnc controller pod
67     # CHANGEME part
68     onap_namespace = 'onap'
69     onap_environment = 'dev'
70     sdnc_controller_pod = '-'.join([onap_environment, 'sdnc-sdnc-0'])
71
72     template_variable_symbol = '${'
73     cpe_vm_prefix = 'zdcpe'
74
75     #############################################################################################
76     # preloading network config
77     #  key=network role
78     #  value = [subnet_start_ip, subnet_gateway_ip]
79     preload_network_config = {
80         'cpe_public': ['10.2.0.2', '10.2.0.1'],
81         'cpe_signal': ['10.4.0.2', '10.4.0.1'],
82         'brg_bng': ['10.3.0.2', '10.3.0.1'],
83         'bng_mux': ['10.1.0.10', '10.1.0.1'],
84         'mux_gw': ['10.5.0.10', '10.5.0.1']
85     }
86
87     dcae_ves_collector_name = 'dcae-bootstrap'
88     global_subscriber_id = 'SDN-ETHERNET-INTERNET'
89     project_name = 'Project-Demonstration'
90     owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
91     owning_entity_name = 'OE-Demonstration1'
92
93     def __init__(self, extra_host_names=None):
94         self.logger = logging.getLogger(__name__)
95         self.logger.setLevel(logging.DEBUG)
96         self.logger.info('Initializing configuration')
97
98         ##################################################################################################################################
99         # following param must be updated e.g. from csar file (grep for VfModuleModelInvariantUuid string) before vcpe.py customer call !!
100         # vgw_VfModuleModelInvariantUuid is in rescust service csar,
101         # look in service-VcpesvcRescust1118-template.yml for groups vgw module metadata. TODO: read this value automatically
102         # CHANGEME part
103         self.vgw_VfModuleModelInvariantUuid = '26d6a718-17b2-4ba8-8691-c44343b2ecd2'
104
105         # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
106         self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
107         # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
108         self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
109         # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
110         self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
111         # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
112         self.mr_ip_addr = self.oom_so_sdnc_aai_ip
113         self.mr_ip_port = '30227'
114         self.so_nbi_port = '30277' if self.oom_mode else '8080'
115         self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
116         self.aai_query_port = '30233' if self.oom_mode else '8443'
117         self.sniro_port = '30288' if self.oom_mode else '8080'
118
119         self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name, 'mariadb-galera']
120         if extra_host_names:
121             self.host_names.extend(extra_host_names)
122         # get IP addresses
123         self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
124         # this is the keyword used to name vgw stack, must not be used in other stacks
125         self.vgw_name_keyword = 'base_vcpe_vgw'
126         # this is the file that will keep the index of last assigned SO name
127         self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
128         self.svc_instance_uuid_file = '__var/svc_instance_uuid'
129         self.preload_dict_file = '__var/preload_dict'
130         self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
131         self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
132         self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
133         self.instance_name_prefix = {
134             'service': 'vcpe_svc',
135             'network': 'vcpe_net',
136             'vnf': 'vcpe_vnf',
137             'vfmodule': 'vcpe_vfmodule'
138         }
139         self.aai_userpass = 'AAI', 'AAI'
140
141         ############################################################################################################
142         # following key is overriding public key from vCPE heat templates, it's important to use correct one in here
143         # CHANGEME part
144         self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
145
146         self.os_tenant_id = self.cloud['--os-tenant-id']
147         self.os_region_name = self.cloud['--os-region-name']
148         self.common_preload_config['pub_key'] = self.pub_key
149         self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
150         self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
151         self.homing_solution = 'sniro'  # value is either 'sniro' or 'oof'
152 #        self.homing_solution = 'oof'
153         self.customer_location_used_by_oof = {
154             "customerLatitude": "32.897480",
155             "customerLongitude": "-97.040443",
156             "customerName": "some_company"
157         }
158
159         #############################################################################################
160         # SDC urls
161         self.sdc_be_port = '30204'
162         self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
163         self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
164         self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
165         self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
166
167         self.sdc_fe_port = '30207'
168         self.sdc_fe_request_userpass = 'beep', 'boop'
169         self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
170         self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
171         self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
172         self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
173
174         #############################################################################################
175         # SDNC urls
176         self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
177         self.sdnc_db_name = 'sdnctl'
178         self.sdnc_db_user = 'sdnctl'
179         self.sdnc_db_pass = 'gamma'
180         self.sdnc_db_port = self.get_k8s_service_endpoint_info('mariadb-galera','port') if self.oom_mode else '3306'
181         self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
182         self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
183                                         ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
184         self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
185                                         ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
186         self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
187                                     ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
188         self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
189                                     ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
190         self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
191                                    '/restconf/config/GENERIC-RESOURCE-API:'
192
193         #############################################################################################
194         # SO urls, note: do NOT add a '/' at the end of the url
195         self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
196                            'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
197         self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
198         self.so_userpass = 'InfraPortalClient', 'password1$'
199         self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
200         self.so_db_name = 'catalogdb'
201         self.so_db_user = 'root'
202         self.so_db_pass = 'secretpassword'
203         self.so_db_port = '30252' if self.oom_mode else '32769'
204
205         self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
206         self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
207         self.vpp_api_userpass = ('admin', 'admin')
208         self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
209
210         #############################################################################################
211         # POLICY urls
212         self.policy_userpass = ('healthcheck', 'zb!XztG34')
213         self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
214         self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
215         self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
216         self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
217         self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
218         self.policy_api_service_name = 'policy-api'
219         self.policy_pap_service_name = 'policy-pap'
220
221         #############################################################################################
222         # MARIADB-GALERA settings
223         self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
224         self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
225
226     def heatbridge(self, openstack_stack_name, svc_instance_uuid):
227         """
228         Add vserver information to AAI
229         """
230         self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
231         if not self.oom_mode:
232             cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
233             ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
234             self.logger.debug('%s', ret)
235         else:
236             print('To add vGMUX vserver info to AAI, do the following:')
237             print('- ssh to rancher')
238             print('- sudo su -')
239             print('- cd /root/oom/kubernetes/robot')
240             print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
241
242     def get_brg_mac_from_sdnc(self):
243         """
244         Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
245         Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
246         """
247         if self.oom_mode:
248             db_host=self.mariadb_galera_endpoint_ip
249         else:
250             db_host=self.hosts['mariadb-galera']
251
252         cnx = mysql.connector.connect(user=self.sdnc_db_user,
253                                       password=self.sdnc_db_pass,
254                                       database=self.sdnc_db_name,
255                                       host=db_host,
256                                       port=self.sdnc_db_port)
257         cursor = cnx.cursor()
258         query = "SELECT * from DHCP_MAP"
259         cursor.execute(query)
260
261         self.logger.debug('DHCP_MAP table in SDNC')
262         mac_recent = None
263         host = -1
264         for mac, ip in cursor:
265             self.logger.debug(mac + ' - ' + ip)
266             this_host = int(ip.split('.')[-1])
267             if host < this_host:
268                 host = this_host
269                 mac_recent = mac
270
271         cnx.close()
272
273         try:
274             assert mac_recent
275         except AssertionError:
276             self.logger.error('Failed to obtain BRG MAC address from database')
277             sys.exit(1)
278
279         return mac_recent
280
281     def execute_cmds_mariadb(self, cmds):
282         self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
283                              self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
284                              self.mariadb_galera_endpoint_port)
285
286     def execute_cmds_sdnc_db(self, cmds):
287         self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
288                              self.hosts['sdnc'], self.sdnc_db_port)
289
290     def execute_cmds_so_db(self, cmds):
291         self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
292                              self.hosts['so'], self.so_db_port)
293
294     def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
295         cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
296         cursor = cnx.cursor()
297         for cmd in cmds:
298             self.logger.debug(cmd)
299             cursor.execute(cmd)
300             self.logger.debug('%s', cursor)
301         cnx.commit()
302         cursor.close()
303         cnx.close()
304
305     def find_file(self, file_name_keyword, file_ext, search_dir):
306         """
307         :param file_name_keyword:  keyword used to look for the csar file, case insensitive matching, e.g, infra
308         :param file_ext: e.g., csar, json
309         :param search_dir path to search
310         :return: path name of the file
311         """
312         file_name_keyword = file_name_keyword.lower()
313         file_ext = file_ext.lower()
314         if not file_ext.startswith('.'):
315             file_ext = '.' + file_ext
316
317         filenamepath = None
318         for file_name in os.listdir(search_dir):
319             file_name_lower = file_name.lower()
320             if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
321                 if filenamepath:
322                     self.logger.error('Multiple files found for *{0}*.{1} in '
323                                       'directory {2}'.format(file_name_keyword, file_ext, search_dir))
324                     sys.exit(1)
325                 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
326
327         if filenamepath:
328             return filenamepath
329         else:
330             self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
331             sys.exit(1)
332
333     @staticmethod
334     def network_name_to_subnet_name(network_name):
335         """
336         :param network_name: example: vcpe_net_cpe_signal_201711281221
337         :return: vcpe_net_cpe_signal_subnet_201711281221
338         """
339         fields = network_name.split('_')
340         fields.insert(-1, 'subnet')
341         return '_'.join(fields)
342
343     def set_network_name(self, network_name):
344         param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
345         openstackcmd = 'openstack ' + param
346         cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
347         os.popen(cmd)
348
349     def set_subnet_name(self, network_name):
350         """
351         Example: network_name =  vcpe_net_cpe_signal_201711281221
352         set subnet name to vcpe_net_cpe_signal_subnet_201711281221
353         :return:
354         """
355         param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
356         openstackcmd = 'openstack ' + param
357
358         # expected results: | subnets | subnet_id |
359         subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
360         if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
361             subnet_id = subnet_info[2].strip()
362             subnet_name = self.network_name_to_subnet_name(network_name)
363             cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
364             os.popen(cmd)
365             self.logger.info("Subnet name set to: " + subnet_name)
366             return True
367         else:
368             self.logger.error("Can't get subnet info from network name: " + network_name)
369             return False
370
371     def set_closed_loop_policy(self, policy_template_file):
372         # Gather policy services cluster ips
373         p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
374         p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
375
376         # Read policy json from file
377         with open(policy_template_file) as f:
378             try:
379                 policy_json = json.load(f)
380             except ValueError:
381                 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
382                 sys.exit(1)
383
384         # Check policy already applied
385         policy_exists_req = requests.get(self.policy_pap_get_url.format(
386                             p_pap_cluster_ip), auth=self.policy_userpass,
387                             verify=False, headers=self.policy_headers)
388         if policy_exists_req.status_code != 200:
389             self.logger.error('Failure in checking CL policy existence. '
390                                'Policy-pap responded with HTTP code {0}'.format(
391                                policy_exists_req.status_code))
392             sys.exit(1)
393
394         try:
395             policy_exists_json = policy_exists_req.json()
396         except ValueError as e:
397             self.logger.error('Policy-pap request failed: ' + e.message)
398             sys.exit(1)
399
400         try:
401             assert policy_exists_json['groups'][0]['pdpSubgroups'] \
402                                [1]['policies'][0]['name'] != 'operational.vcpe'
403         except AssertionError:
404             self.logger.info('vCPE closed loop policy already exists, not applying')
405             return
406         except IndexError:
407             pass # policy doesn't exist
408
409         # Create policy
410         policy_create_req = requests.post(self.policy_api_url.format(
411                             p_api_cluster_ip), auth=self.policy_userpass,
412                             json=policy_json, verify=False,
413                             headers=self.policy_headers)
414         # Get the policy id from policy-api response
415         if policy_create_req.status_code != 200:
416             self.logger.error('Failed creating policy. Policy-api responded'
417                               ' with HTTP code {0}'.format(policy_create_req.status_code))
418             sys.exit(1)
419
420         try:
421             policy_version = json.loads(policy_create_req.text)['policy-version']
422         except (KeyError, ValueError):
423             self.logger.error('Policy API response not understood:')
424             self.logger.debug('\n' + str(policy_create_req.text))
425
426         # Inject the policy into Policy PAP
427         self.policy_pap_json['policies'].append({'policy-version': policy_version})
428         policy_insert_req = requests.post(self.policy_pap_post_url.format(
429                             p_pap_cluster_ip), auth=self.policy_userpass,
430                             json=self.policy_pap_json, verify=False,
431                             headers=self.policy_headers)
432         if policy_insert_req.status_code != 200:
433             self.logger.error('Policy PAP request failed with HTTP code'
434                               '{0}'.format(policy_insert_req.status_code))
435             sys.exit(1)
436         self.logger.info('Successully pushed closed loop Policy')
437
438     def is_node_in_aai(self, node_type, node_uuid):
439         key = None
440         search_node_type = None
441         if node_type == 'service':
442             search_node_type = 'service-instance'
443             key = 'service-instance-id'
444         elif node_type == 'vnf':
445             search_node_type = 'generic-vnf'
446             key = 'vnf-id'
447         else:
448             logging.error('Invalid node_type: ' + node_type)
449             sys.exit(1)
450
451         url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
452             self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
453
454         headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
455         r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
456         response = r.json()
457         self.logger.debug('aai query: ' + url)
458         self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
459         return 'result-data' in response
460
461     @staticmethod
462     def extract_ip_from_str(net_addr, net_addr_len, sz):
463         """
464         :param net_addr:  e.g. 10.5.12.0
465         :param net_addr_len: e.g. 24
466         :param sz: a string
467         :return: the first IP address matching the network, e.g. 10.5.12.3
468         """
469         network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
470         ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
471         for ip in ip_list:
472             this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
473             if this_net == network:
474                 return str(ip)
475         return None
476
477     def get_pod_node_oam_ip(self, pod):
478         """
479         :Assuming kubectl is available and configured by default config (~/.kube/config) 
480         :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
481         :return pod's cluster node oam ip (10.0.0.0/16)
482         """
483         ret = None
484         config.load_kube_config()
485         api = client.CoreV1Api()
486         kslogger = logging.getLogger('kubernetes')
487         kslogger.setLevel(logging.INFO)
488         res = api.list_pod_for_all_namespaces()
489         for i in res.items:
490             if pod in i.metadata.name:
491                 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
492                 ret = i.status.host_ip
493                 break
494
495         if ret is None:
496             ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
497         return ret
498
499     def get_pod_node_public_ip(self, pod):
500         """
501         :Assuming kubectl is available and configured by default config (~/.kube/config) 
502         :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
503         :return pod's cluster node public ip (i.e. 10.12.0.0/16)
504         """
505         ret = None
506         config.load_kube_config()
507         api = client.CoreV1Api()
508         kslogger = logging.getLogger('kubernetes')
509         kslogger.setLevel(logging.INFO)
510         res = api.list_pod_for_all_namespaces()
511         for i in res.items:
512             if pod in i.metadata.name:
513                 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
514                 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
515                 break
516
517         if ret is None:
518             ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
519         return ret
520
521     def get_vm_public_ip_by_nova(self, vm):
522         """
523         This method uses openstack nova api to retrieve vm public ip
524         :param vm: vm name
525         :return vm public ip
526         """
527         subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
528         nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url']) 
529         for i in nova.servers.list():
530             if i.name == vm:
531                 for k, v in i.networks.items():
532                     for ip in v:
533                         if IPAddress(ip) in subnet:
534                             return ip
535         return None
536
537     def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
538         """
539         :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
540         :param net_addr: e.g. 10.12.5.0
541         :param net_addr_len: e.g. 24
542         :return: dictionary {keyword: ip}
543         """
544         if not net_addr:
545             net_addr = self.external_net_addr
546
547         if not net_addr_len:
548             net_addr_len = self.external_net_prefix_len
549
550         param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
551         openstackcmd = 'nova ' + param + ' list'
552         self.logger.debug(openstackcmd)
553
554         results = os.popen(openstackcmd).read()
555         all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
556         latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
557         latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
558         ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
559         if self.oom_mode:
560             ip_dict.update(self.get_oom_onap_vm_ip(keywords))
561
562         if len(ip_dict) != len(keywords):
563             self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
564             self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
565             self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
566 #            sys.exit(1)
567         return ip_dict
568
569     def get_oom_onap_vm_ip(self, keywords):
570         vm_ip = {}
571         for vm in keywords:
572             if vm in self.host_names:
573                 vm_ip[vm] = self.oom_so_sdnc_aai_ip
574         return vm_ip
575
576     def get_k8s_service_cluster_ip(self, service):
577         """
578         Returns cluster IP for a given service
579         :param service: name of the service
580         :return: cluster ip
581         """
582         config.load_kube_config()
583         api = client.CoreV1Api()
584         kslogger = logging.getLogger('kubernetes')
585         kslogger.setLevel(logging.INFO)
586         try:
587             resp = api.read_namespaced_service(service, self.onap_namespace)
588         except client.rest.ApiException as e:
589             self.logger.error('Error while making k8s API request: ' + e.body)
590             sys.exit(1)
591
592         return resp.spec.cluster_ip
593
594     def get_k8s_service_endpoint_info(self, service, subset):
595         """
596         Returns endpoint data for a given service and subset. If there
597         is more than one endpoint returns data for the first one from
598         the list that API returned.
599         :param service: name of the service
600         :param subset: subset name, one of "ip","port"
601         :return: endpoint ip
602         """
603         config.load_kube_config()
604         api = client.CoreV1Api()
605         kslogger = logging.getLogger('kubernetes')
606         kslogger.setLevel(logging.INFO)
607         try:
608             resp = api.read_namespaced_endpoints(service, self.onap_namespace)
609         except client.rest.ApiException as e:
610             self.logger.error('Error while making k8s API request: ' + e.body)
611             sys.exit(1)
612
613         if subset == "ip":
614             return resp.subsets[0].addresses[0].ip
615         elif subset == "port":
616             return resp.subsets[0].ports[0].port
617         else:
618             self.logger.error("Unsupported subset type")
619
620     def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
621         vm_ip_dict = {}
622         for line in novalist_results.split('\n'):
623             fields = line.split('|')
624             if len(fields) == 8:
625                 vm_name = fields[2]
626                 ip_info = fields[-2]
627                 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
628                 vm_ip_dict[vm_name] = ip
629
630         return vm_ip_dict
631
632     def remove_old_vms(self, vm_list, prefix):
633         """
634         For vms with format name_timestamp, only keep the one with the latest timestamp.
635         E.g.,
636             zdcpe1cpe01brgemu01_201805222148        (drop this)
637             zdcpe1cpe01brgemu01_201805222229        (keep this)
638             zdcpe1cpe01gw01_201805162201
639         """
640         new_vm_list = []
641         same_type_vm_dict = {}
642         for vm in vm_list:
643             fields = vm.split('_')
644             if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
645                 if vm > same_type_vm_dict.get(fields[0], '0'):
646                     same_type_vm_dict[fields[0]] = vm
647             else:
648                 new_vm_list.append(vm)
649
650         new_vm_list.extend(same_type_vm_dict.values())
651         return new_vm_list
652
653     def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
654         vm_ip_dict = {}
655         for keyword in vm_name_keyword_list:
656             for vm, ip in all_vm_ip_dict.items():
657                 if keyword in vm:
658                     vm_ip_dict[keyword] = ip
659                     break
660         return vm_ip_dict
661
662     def del_vgmux_ves_mode(self):
663         url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
664         r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
665         self.logger.debug('%s', r)
666
667     def del_vgmux_ves_collector(self):
668         url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
669         r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
670         self.logger.debug('%s', r)
671
672     def set_vgmux_ves_collector(self ):
673         url = self.vpp_ves_url.format(self.hosts['mux'])
674         data = {'config':
675                     {'server-addr': self.hosts[self.dcae_ves_collector_name],
676                      'server-port': '30235' if self.oom_mode else '8081',
677                      'read-interval': '10',
678                      'is-add':'1'
679                      }
680                 }
681         r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
682         self.logger.debug('%s', r)
683
684     def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
685         url = self.vpp_ves_url.format(self.hosts['mux'])
686         data = {"mode":
687                     {"working-mode": "demo",
688                      "base-packet-loss": str(lossrate),
689                      "source-name": vg_vnf_instance_name
690                      }
691                 }
692         r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
693         self.logger.debug('%s', r)
694
695         # return all the VxLAN interface names of BRG or vGMUX based on the IP address
696     def get_vxlan_interfaces(self, ip, print_info=False):
697         url = self.vpp_inf_url.format(ip)
698         self.logger.debug('url is this: %s', url)
699         r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
700         data = r.json()['interfaces']['interface']
701         if print_info:
702             for inf in data:
703                 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
704                     print(json.dumps(inf, indent=4, sort_keys=True))
705
706         return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
707
708     # delete all VxLAN interfaces of each hosts
709     def delete_vxlan_interfaces(self, host_dic):
710         for host, ip in host_dic.items():
711             deleted = False
712             self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
713             inf_list = self.get_vxlan_interfaces(ip)
714             for inf in inf_list:
715                 deleted = True
716                 time.sleep(2)
717                 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
718                 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
719                 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
720
721             for inf in inf_list:
722                 deleted = True
723                 time.sleep(2)
724                 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
725                 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
726                 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
727
728             if len(self.get_vxlan_interfaces(ip)) > 0:
729                 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
730                 return False
731
732             if not deleted:
733                 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
734         return True
735
736     @staticmethod
737     def save_object(obj, filepathname):
738         with open(filepathname, 'wb') as fout:
739             pickle.dump(obj, fout)
740
741     @staticmethod
742     def load_object(filepathname):
743         with open(filepathname, 'rb') as fin:
744             return pickle.load(fin)
745
746     @staticmethod
747     def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
748         with open(vnf_template_file) as json_input:
749             json_data = json.load(json_input)
750             param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
751             for param in param_list:
752                 if param['vnf-parameter-name'] in vnf_parameter_name_list:
753                     ipaddr_or_vni = param['vnf-parameter-value'].split('.')
754                     number = int(ipaddr_or_vni[-1])
755                     if 254 == number:
756                         number = 10
757                     else:
758                         number = number + 1
759                     ipaddr_or_vni[-1] = str(number)
760                     param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
761
762         assert json_data is not None
763         with open(vnf_template_file, 'w') as json_output:
764             json.dump(json_data, json_output, indent=4, sort_keys=True)
765
766     def save_preload_data(self, preload_data):
767         self.save_object(preload_data, self.preload_dict_file)
768
769     def load_preload_data(self):
770         return self.load_object(self.preload_dict_file)
771
772     def save_vgmux_vnf_name(self, vgmux_vnf_name):
773         self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
774
775     def load_vgmux_vnf_name(self):
776         return self.load_object(self.vgmux_vnf_name_file)
777