Set gra_api to 'False' by default
[integration.git] / test / vcpe / vcpecommon.py
1 #!/usr/bin/env python
2
3 import json
4 import logging
5 import os
6 import pickle
7 import re
8 import sys
9
10 import ipaddress
11 import mysql.connector
12 import requests
13 import commands
14 import time
15 from novaclient import client as openstackclient
16 from kubernetes import client, config
17 from netaddr import IPAddress, IPNetwork
18
19 ######################################################################
20 # Parts which must be updated / cross-checked during each deployment #
21 # are marked as CHANGEME                                             #
22 ######################################################################
23
24 class VcpeCommon:
25     #############################################################################################
26     # Set network prefix of k8s host external address; it's used for pod public IP autodetection
27     # but can be overriden from user in case of autodetection failure
28     external_net_addr = '10.12.0.0'
29     external_net_prefix_len = 16
30
31     #############################################################################################
32     # set the openstack cloud access credentials here
33     oom_mode = True
34
35     #############################################################################################
36     # set the gra_api flag
37     # Mustn't be set to True until Frankfurt DGs are updated for GRA-API infrastructure
38     gra_api_flag= False
39
40     ###########################
41     # set Openstack credentials
42     # CHANGEME part
43     cloud = {
44         '--os-auth-url': 'http://10.12.25.2:5000',
45         '--os-username': 'kxi',
46         '--os-user-domain-id': 'default',
47         '--os-project-domain-id': 'default',
48         '--os-tenant-id': '712b6016580e410b9abfec9ca34953ce' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
49         '--os-region-name': 'RegionOne',
50         '--os-password': 'n3JhGMGuDzD8',
51         '--os-project-domain-name': 'Integration-Release-Daily' if oom_mode else 'Integration-SB-07',
52         '--os-identity-api-version': '3'
53     }
54
55     ############################################################################
56     # set oam and public network which must exist in openstack before deployment
57     # CHANGEME part
58     common_preload_config = {
59         'oam_onap_net': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
60         'oam_onap_subnet': 'oam_network_exxC' if oom_mode else 'oam_onap_lAky',
61         'public_net': 'external',
62         'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
63     }
64
65     #############################################################################
66     # Set name of Onap's k8s namespace and sdnc controller pod
67     # CHANGEME part
68     onap_namespace = 'onap'
69     onap_environment = 'dev'
70     sdnc_controller_pod = '-'.join([onap_environment, 'sdnc-sdnc-0'])
71
72     template_variable_symbol = '${'
73     cpe_vm_prefix = 'zdcpe'
74
75     #############################################################################################
76     # preloading network config
77     #  key=network role
78     #  value = [subnet_start_ip, subnet_gateway_ip]
79     preload_network_config = {
80         'cpe_public': ['10.2.0.2', '10.2.0.1'],
81         'cpe_signal': ['10.4.0.2', '10.4.0.1'],
82         'brg_bng': ['10.3.0.2', '10.3.0.1'],
83         'bng_mux': ['10.1.0.10', '10.1.0.1'],
84         'mux_gw': ['10.5.0.10', '10.5.0.1']
85     }
86
87     dcae_ves_collector_name = 'dcae-bootstrap'
88     global_subscriber_id = 'SDN-ETHERNET-INTERNET'
89     project_name = 'Project-Demonstration'
90     owning_entity_id = '520cc603-a3c4-4ec2-9ef4-ca70facd79c0'
91     owning_entity_name = 'OE-Demonstration1'
92
93     def __init__(self, extra_host_names=None):
94         self.logger = logging.getLogger(__name__)
95         self.logger.setLevel(logging.DEBUG)
96         self.logger.info('Initializing configuration')
97
98         ##################################################################################################################################
99         # following param must be updated e.g. from csar file (grep for VfModuleModelInvariantUuid string) before vcpe.py customer call !!
100         # vgw_VfModuleModelInvariantUuid is in rescust service csar,
101         # look in service-VcpesvcRescust1118-template.yml for groups vgw module metadata. TODO: read this value automatically
102         # CHANGEME part
103         self.vgw_VfModuleModelInvariantUuid = '26d6a718-17b2-4ba8-8691-c44343b2ecd2'
104
105         # OOM: this is the address that the brg and bng will nat for sdnc access - 10.0.0.x address of k8 host for sdnc-0 container
106         self.sdnc_oam_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)
107         # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
108         self.oom_so_sdnc_aai_ip = self.get_pod_node_public_ip(self.sdnc_controller_pod)
109         # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
110         self.oom_dcae_ves_collector = self.oom_so_sdnc_aai_ip
111         # OOM: this is a k8s host external IP, e.g. oom-k8s-01 IP
112         self.mr_ip_addr = self.oom_so_sdnc_aai_ip
113         self.mr_ip_port = '30227'
114         self.so_nbi_port = '30277' if self.oom_mode else '8080'
115         self.sdnc_preloading_port = '30267' if self.oom_mode else '8282'
116         self.aai_query_port = '30233' if self.oom_mode else '8443'
117         self.sniro_port = '30288' if self.oom_mode else '8080'
118
119         self.host_names = ['sdc', 'so', 'sdnc', 'robot', 'aai-inst1', self.dcae_ves_collector_name]
120         if extra_host_names:
121             self.host_names.extend(extra_host_names)
122         # get IP addresses
123         self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
124         # this is the keyword used to name vgw stack, must not be used in other stacks
125         self.vgw_name_keyword = 'base_vcpe_vgw'
126         # this is the file that will keep the index of last assigned SO name
127         self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
128         self.svc_instance_uuid_file = '__var/svc_instance_uuid'
129         self.preload_dict_file = '__var/preload_dict'
130         self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
131         self.product_family_id = 'f9457e8c-4afd-45da-9389-46acd9bf5116'
132         self.custom_product_family_id = 'a9a77d5a-123e-4ca2-9eb9-0b015d2ee0fb'
133         self.instance_name_prefix = {
134             'service': 'vcpe_svc',
135             'network': 'vcpe_net',
136             'vnf': 'vcpe_vnf',
137             'vfmodule': 'vcpe_vfmodule'
138         }
139         self.aai_userpass = 'AAI', 'AAI'
140
141         ############################################################################################################
142         # following key is overriding public key from vCPE heat templates, it's important to use correct one in here
143         # CHANGEME part
144         self.pub_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh'
145
146         self.os_tenant_id = self.cloud['--os-tenant-id']
147         self.os_region_name = self.cloud['--os-region-name']
148         self.common_preload_config['pub_key'] = self.pub_key
149         self.sniro_url = 'http://' + self.hosts['robot'] + ':' + self.sniro_port + '/__admin/mappings'
150         self.sniro_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
151         self.homing_solution = 'sniro'  # value is either 'sniro' or 'oof'
152 #        self.homing_solution = 'oof'
153         self.customer_location_used_by_oof = {
154             "customerLatitude": "32.897480",
155             "customerLongitude": "-97.040443",
156             "customerName": "some_company"
157         }
158
159         #############################################################################################
160         # SDC urls
161         self.sdc_be_port = '30204'
162         self.sdc_be_request_userpass = 'vid', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
163         self.sdc_be_request_headers = {'X-ECOMP-InstanceID': 'VID'}
164         self.sdc_be_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_be_port
165         self.sdc_service_list_url = self.sdc_be_url_prefix + '/sdc/v1/catalog/services'
166
167         self.sdc_fe_port = '30207'
168         self.sdc_fe_request_userpass = 'beep', 'boop'
169         self.sdc_fe_request_headers = {'USER_ID': 'demo', 'Content-Type': 'application/json'}
170         self.sdc_fe_url_prefix = 'https://' + self.hosts['sdc'] + ':' + self.sdc_fe_port
171         self.sdc_get_category_list_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/categories'
172         self.sdc_create_allotted_resource_subcategory_url = self.sdc_fe_url_prefix + '/sdc1/feProxy/rest/v1/category/resources/resourceNewCategory.allotted%20resource/subCategory'
173
174         #############################################################################################
175         # SDNC urls
176         self.sdnc_userpass = 'admin', 'Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U'
177         self.sdnc_db_name = 'sdnctl'
178         self.sdnc_db_user = 'sdnctl'
179         self.sdnc_db_pass = 'gamma'
180         self.sdnc_db_port = '32774'
181         self.sdnc_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
182         self.sdnc_preload_network_url = 'https://' + self.hosts['sdnc'] + \
183                                         ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
184         self.sdnc_preload_network_gra_url = 'https://' + self.hosts['sdnc'] + \
185                                         ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-network-topology-operation'
186         self.sdnc_preload_vnf_url = 'https://' + self.hosts['sdnc'] + \
187                                     ':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
188         self.sdnc_preload_gra_url = 'https://' + self.hosts['sdnc'] + \
189                                     ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
190         self.sdnc_ar_cleanup_url = 'https://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
191                                    '/restconf/config/GENERIC-RESOURCE-API:'
192
193         #############################################################################################
194         # SO urls, note: do NOT add a '/' at the end of the url
195         self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
196                            'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances'}
197         self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
198         self.so_userpass = 'InfraPortalClient', 'password1$'
199         self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
200         self.so_db_name = 'catalogdb'
201         self.so_db_user = 'root'
202         self.so_db_pass = 'secretpassword'
203         self.so_db_port = '30252' if self.oom_mode else '32769'
204
205         self.vpp_inf_url = 'http://{0}:8183/restconf/config/ietf-interfaces:interfaces'
206         self.vpp_api_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
207         self.vpp_api_userpass = ('admin', 'admin')
208         self.vpp_ves_url= 'http://{0}:8183/restconf/config/vesagent:vesagent'
209
210         #############################################################################################
211         # POLICY urls
212         self.policy_userpass = ('healthcheck', 'zb!XztG34')
213         self.policy_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
214         self.policy_api_url = 'https://{0}:6969/policy/api/v1/policytypes/onap.policies.controlloop.Operational/versions/1.0.0/policies'
215         self.policy_pap_get_url = 'https://{0}:6969/policy/pap/v1/pdps'
216         self.policy_pap_json = {'policies': [{'policy-id': 'operational.vcpe'}]}
217         self.policy_pap_post_url = self.policy_pap_get_url + '/policies'
218         self.policy_api_service_name = 'policy-api'
219         self.policy_pap_service_name = 'policy-pap'
220
221         #############################################################################################
222         # MARIADB-GALERA settings
223         self.mariadb_galera_endpoint_ip = self.get_k8s_service_endpoint_info('mariadb-galera','ip')
224         self.mariadb_galera_endpoint_port = self.get_k8s_service_endpoint_info('mariadb-galera','port')
225
226     def heatbridge(self, openstack_stack_name, svc_instance_uuid):
227         """
228         Add vserver information to AAI
229         """
230         self.logger.info('Adding vServer information to AAI for {0}'.format(openstack_stack_name))
231         if not self.oom_mode:
232             cmd = '/opt/demo.sh heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid)
233             ret = commands.getstatusoutput("ssh -i onap_dev root@{0} '{1}'".format(self.hosts['robot'], cmd))
234             self.logger.debug('%s', ret)
235         else:
236             print('To add vGMUX vserver info to AAI, do the following:')
237             print('- ssh to rancher')
238             print('- sudo su -')
239             print('- cd /root/oom/kubernetes/robot')
240             print('- ./demo-k8s.sh onap heatbridge {0} {1} vCPE'.format(openstack_stack_name, svc_instance_uuid))
241
242     def get_brg_mac_from_sdnc(self):
243         """
244         Check table DHCP_MAP in the SDNC DB. Find the newly instantiated BRG MAC address.
245         Note that there might be multiple BRGs, the most recently instantiated BRG always has the largest IP address.
246         """
247         cnx = mysql.connector.connect(user=self.sdnc_db_user, password=self.sdnc_db_pass, database=self.sdnc_db_name,
248                                       host=self.hosts['sdnc'], port=self.sdnc_db_port)
249         cursor = cnx.cursor()
250         query = "SELECT * from DHCP_MAP"
251         cursor.execute(query)
252
253         self.logger.debug('DHCP_MAP table in SDNC')
254         mac_recent = None
255         host = -1
256         for mac, ip in cursor:
257             self.logger.debug(mac + ':' + ip)
258             this_host = int(ip.split('.')[-1])
259             if host < this_host:
260                 host = this_host
261                 mac_recent = mac
262
263         cnx.close()
264
265         assert mac_recent
266         return mac_recent
267
268     def execute_cmds_mariadb(self, cmds):
269         self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass,
270                              self.sdnc_db_name, self.mariadb_galera_endpoint_ip,
271                              self.mariadb_galera_endpoint_port)
272
273     def execute_cmds_sdnc_db(self, cmds):
274         self.execute_cmds_db(cmds, self.sdnc_db_user, self.sdnc_db_pass, self.sdnc_db_name,
275                              self.hosts['sdnc'], self.sdnc_db_port)
276
277     def execute_cmds_so_db(self, cmds):
278         self.execute_cmds_db(cmds, self.so_db_user, self.so_db_pass, self.so_db_name,
279                              self.hosts['so'], self.so_db_port)
280
281     def execute_cmds_db(self, cmds, dbuser, dbpass, dbname, host, port):
282         cnx = mysql.connector.connect(user=dbuser, password=dbpass, database=dbname, host=host, port=port)
283         cursor = cnx.cursor()
284         for cmd in cmds:
285             self.logger.debug(cmd)
286             cursor.execute(cmd)
287             self.logger.debug('%s', cursor)
288         cnx.commit()
289         cursor.close()
290         cnx.close()
291
292     def find_file(self, file_name_keyword, file_ext, search_dir):
293         """
294         :param file_name_keyword:  keyword used to look for the csar file, case insensitive matching, e.g, infra
295         :param file_ext: e.g., csar, json
296         :param search_dir path to search
297         :return: path name of the file
298         """
299         file_name_keyword = file_name_keyword.lower()
300         file_ext = file_ext.lower()
301         if not file_ext.startswith('.'):
302             file_ext = '.' + file_ext
303
304         filenamepath = None
305         for file_name in os.listdir(search_dir):
306             file_name_lower = file_name.lower()
307             if file_name_keyword in file_name_lower and file_name_lower.endswith(file_ext):
308                 if filenamepath:
309                     self.logger.error('Multiple files found for *{0}*.{1} in '
310                                       'directory {2}'.format(file_name_keyword, file_ext, search_dir))
311                     sys.exit(1)
312                 filenamepath = os.path.abspath(os.path.join(search_dir, file_name))
313
314         if filenamepath:
315             return filenamepath
316         else:
317             self.logger.error("Cannot find *{0}*{1} in directory {2}".format(file_name_keyword, file_ext, search_dir))
318             sys.exit(1)
319
320     @staticmethod
321     def network_name_to_subnet_name(network_name):
322         """
323         :param network_name: example: vcpe_net_cpe_signal_201711281221
324         :return: vcpe_net_cpe_signal_subnet_201711281221
325         """
326         fields = network_name.split('_')
327         fields.insert(-1, 'subnet')
328         return '_'.join(fields)
329
330     def set_network_name(self, network_name):
331         param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
332         openstackcmd = 'openstack ' + param
333         cmd = ' '.join([openstackcmd, 'network set --name', network_name, 'ONAP-NW1'])
334         os.popen(cmd)
335
336     def set_subnet_name(self, network_name):
337         """
338         Example: network_name =  vcpe_net_cpe_signal_201711281221
339         set subnet name to vcpe_net_cpe_signal_subnet_201711281221
340         :return:
341         """
342         param = ' '.join([k + ' ' + v for k, v in self.cloud.items()])
343         openstackcmd = 'openstack ' + param
344
345         # expected results: | subnets | subnet_id |
346         subnet_info = os.popen(openstackcmd + ' network show ' + network_name + ' |grep subnets').read().split('|')
347         if len(subnet_info) > 2 and subnet_info[1].strip() == 'subnets':
348             subnet_id = subnet_info[2].strip()
349             subnet_name = self.network_name_to_subnet_name(network_name)
350             cmd = ' '.join([openstackcmd, 'subnet set --name', subnet_name, subnet_id])
351             os.popen(cmd)
352             self.logger.info("Subnet name set to: " + subnet_name)
353             return True
354         else:
355             self.logger.error("Can't get subnet info from network name: " + network_name)
356             return False
357
358     def set_closed_loop_policy(self, policy_template_file):
359         # Gather policy services cluster ips
360         p_api_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_api_service_name)
361         p_pap_cluster_ip = self.get_k8s_service_cluster_ip(self.policy_pap_service_name)
362
363         # Read policy json from file
364         with open(policy_template_file) as f:
365             try:
366                 policy_json = json.load(f)
367             except ValueError:
368                 self.logger.error(policy_template_file + " doesn't seem to contain valid JSON data")
369                 sys.exit(1)
370
371         # Check policy already applied
372         policy_exists_req = requests.get(self.policy_pap_get_url.format(
373                             p_pap_cluster_ip), auth=self.policy_userpass,
374                             verify=False, headers=self.policy_headers)
375         if policy_exists_req.status_code != 200:
376             self.logger.error('Failure in checking CL policy existence. '
377                                'Policy-pap responded with HTTP code {0}'.format(
378                                policy_exists_req.status_code))
379             sys.exit(1)
380
381         try:
382             policy_exists_json = policy_exists_req.json()
383         except ValueError as e:
384             self.logger.error('Policy-pap request failed: ' + e.message)
385             sys.exit(1)
386
387         try:
388             assert policy_exists_json['groups'][0]['pdpSubgroups'] \
389                                [1]['policies'][0]['name'] != 'operational.vcpe'
390         except AssertionError:
391             self.logger.info('vCPE closed loop policy already exists, not applying')
392             return
393         except IndexError:
394             pass # policy doesn't exist
395
396         # Create policy
397         policy_create_req = requests.post(self.policy_api_url.format(
398                             p_api_cluster_ip), auth=self.policy_userpass,
399                             json=policy_json, verify=False,
400                             headers=self.policy_headers)
401         # Get the policy id from policy-api response
402         if policy_create_req.status_code != 200:
403             self.logger.error('Failed creating policy. Policy-api responded'
404                               ' with HTTP code {0}'.format(policy_create_req.status_code))
405             sys.exit(1)
406
407         try:
408             policy_version = json.loads(policy_create_req.text)['policy-version']
409         except (KeyError, ValueError):
410             self.logger.error('Policy API response not understood:')
411             self.logger.debug('\n' + str(policy_create_req.text))
412
413         # Inject the policy into Policy PAP
414         self.policy_pap_json['policies'].append({'policy-version': policy_version})
415         policy_insert_req = requests.post(self.policy_pap_post_url.format(
416                             p_pap_cluster_ip), auth=self.policy_userpass,
417                             json=self.policy_pap_json, verify=False,
418                             headers=self.policy_headers)
419         if policy_insert_req.status_code != 200:
420             self.logger.error('Policy PAP request failed with HTTP code'
421                               '{0}'.format(policy_insert_req.status_code))
422             sys.exit(1)
423         self.logger.info('Successully pushed closed loop Policy')
424
425     def is_node_in_aai(self, node_type, node_uuid):
426         key = None
427         search_node_type = None
428         if node_type == 'service':
429             search_node_type = 'service-instance'
430             key = 'service-instance-id'
431         elif node_type == 'vnf':
432             search_node_type = 'generic-vnf'
433             key = 'vnf-id'
434         else:
435             logging.error('Invalid node_type: ' + node_type)
436             sys.exit(1)
437
438         url = 'https://{0}:{1}/aai/v11/search/nodes-query?search-node-type={2}&filter={3}:EQUALS:{4}'.format(
439             self.hosts['aai-inst1'], self.aai_query_port, search_node_type, key, node_uuid)
440
441         headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-FromAppID': 'vCPE-Robot', 'X-TransactionId': 'get_aai_subscr'}
442         r = requests.get(url, headers=headers, auth=self.aai_userpass, verify=False)
443         response = r.json()
444         self.logger.debug('aai query: ' + url)
445         self.logger.debug('aai response:\n' + json.dumps(response, indent=4, sort_keys=True))
446         return 'result-data' in response
447
448     @staticmethod
449     def extract_ip_from_str(net_addr, net_addr_len, sz):
450         """
451         :param net_addr:  e.g. 10.5.12.0
452         :param net_addr_len: e.g. 24
453         :param sz: a string
454         :return: the first IP address matching the network, e.g. 10.5.12.3
455         """
456         network = ipaddress.ip_network(unicode('{0}/{1}'.format(net_addr, net_addr_len)), strict=False)
457         ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', sz)
458         for ip in ip_list:
459             this_net = ipaddress.ip_network(unicode('{0}/{1}'.format(ip, net_addr_len)), strict=False)
460             if this_net == network:
461                 return str(ip)
462         return None
463
464     def get_pod_node_oam_ip(self, pod):
465         """
466         :Assuming kubectl is available and configured by default config (~/.kube/config) 
467         :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
468         :return pod's cluster node oam ip (10.0.0.0/16)
469         """
470         ret = None
471         config.load_kube_config()
472         api = client.CoreV1Api()
473         kslogger = logging.getLogger('kubernetes')
474         kslogger.setLevel(logging.INFO)
475         res = api.list_pod_for_all_namespaces()
476         for i in res.items:
477             if pod in i.metadata.name:
478                 self.logger.debug("found {0}\t{1}\t{2}".format(i.metadata.name, i.status.host_ip, i.spec.node_name))
479                 ret = i.status.host_ip
480                 break
481
482         if ret is None:
483             ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node OAM IP address(10.0.0.0/16): ")
484         return ret
485
486     def get_pod_node_public_ip(self, pod):
487         """
488         :Assuming kubectl is available and configured by default config (~/.kube/config) 
489         :param pod: pod name substring, e.g. 'sdnc-sdnc-0'
490         :return pod's cluster node public ip (i.e. 10.12.0.0/16)
491         """
492         ret = None
493         config.load_kube_config()
494         api = client.CoreV1Api()
495         kslogger = logging.getLogger('kubernetes')
496         kslogger.setLevel(logging.INFO)
497         res = api.list_pod_for_all_namespaces()
498         for i in res.items:
499             if pod in i.metadata.name:
500                 ret = self.get_vm_public_ip_by_nova(i.spec.node_name)
501                 self.logger.debug("found node {0} public ip: {1}".format(i.spec.node_name, ret))
502                 break
503
504         if ret is None:
505             ret = raw_input("Enter " + self.sdnc_controller_pod + " pod cluster node public IP address(i.e. " + self.external_net_addr + "): ")
506         return ret
507
508     def get_vm_public_ip_by_nova(self, vm):
509         """
510         This method uses openstack nova api to retrieve vm public ip
511         :param vm: vm name
512         :return vm public ip
513         """
514         subnet = IPNetwork('{0}/{1}'.format(self.external_net_addr, self.external_net_prefix_len))
515         nova = openstackclient.Client(2, self.cloud['--os-username'], self.cloud['--os-password'], self.cloud['--os-tenant-id'], self.cloud['--os-auth-url']) 
516         for i in nova.servers.list():
517             if i.name == vm:
518                 for k, v in i.networks.items():
519                     for ip in v:
520                         if IPAddress(ip) in subnet:
521                             return ip
522         return None
523
524     def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
525         """
526         :param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
527         :param net_addr: e.g. 10.12.5.0
528         :param net_addr_len: e.g. 24
529         :return: dictionary {keyword: ip}
530         """
531         if not net_addr:
532             net_addr = self.external_net_addr
533
534         if not net_addr_len:
535             net_addr_len = self.external_net_prefix_len
536
537         param = ' '.join([k + ' ' + v for k, v in self.cloud.items() if 'identity' not in k])
538         openstackcmd = 'nova ' + param + ' list'
539         self.logger.debug(openstackcmd)
540
541         results = os.popen(openstackcmd).read()
542         all_vm_ip_dict = self.extract_vm_ip_as_dict(results, net_addr, net_addr_len)
543         latest_vm_list = self.remove_old_vms(all_vm_ip_dict.keys(), self.cpe_vm_prefix)
544         latest_vm_ip_dict = {vm: all_vm_ip_dict[vm] for vm in latest_vm_list}
545         ip_dict = self.select_subset_vm_ip(latest_vm_ip_dict, keywords)
546         if self.oom_mode:
547             ip_dict.update(self.get_oom_onap_vm_ip(keywords))
548
549         if len(ip_dict) != len(keywords):
550             self.logger.error('Cannot find all desired IP addresses for %s.', keywords)
551             self.logger.error(json.dumps(ip_dict, indent=4, sort_keys=True))
552             self.logger.error('Temporarily continue.. remember to check back vcpecommon.py line: 396')
553 #            sys.exit(1)
554         return ip_dict
555
556     def get_oom_onap_vm_ip(self, keywords):
557         vm_ip = {}
558         for vm in keywords:
559             if vm in self.host_names:
560                 vm_ip[vm] = self.oom_so_sdnc_aai_ip
561         return vm_ip
562
563     def get_k8s_service_cluster_ip(self, service):
564         """
565         Returns cluster IP for a given service
566         :param service: name of the service
567         :return: cluster ip
568         """
569         config.load_kube_config()
570         api = client.CoreV1Api()
571         kslogger = logging.getLogger('kubernetes')
572         kslogger.setLevel(logging.INFO)
573         try:
574             resp = api.read_namespaced_service(service, self.onap_namespace)
575         except client.rest.ApiException as e:
576             self.logger.error('Error while making k8s API request: ' + e.body)
577             sys.exit(1)
578
579         return resp.spec.cluster_ip
580
581     def get_k8s_service_endpoint_info(self, service, subset):
582         """
583         Returns endpoint data for a given service and subset. If there
584         is more than one endpoint returns data for the first one from
585         the list that API returned.
586         :param service: name of the service
587         :param subset: subset name, one of "ip","port"
588         :return: endpoint ip
589         """
590         config.load_kube_config()
591         api = client.CoreV1Api()
592         kslogger = logging.getLogger('kubernetes')
593         kslogger.setLevel(logging.INFO)
594         try:
595             resp = api.read_namespaced_endpoints(service, self.onap_namespace)
596         except client.rest.ApiException as e:
597             self.logger.error('Error while making k8s API request: ' + e.body)
598             sys.exit(1)
599
600         if subset == "ip":
601             return resp.subsets[0].addresses[0].ip
602         elif subset == "port":
603             return resp.subsets[0].ports[0].port
604         else:
605             self.logger.error("Unsupported subset type")
606
607     def extract_vm_ip_as_dict(self, novalist_results, net_addr, net_addr_len):
608         vm_ip_dict = {}
609         for line in novalist_results.split('\n'):
610             fields = line.split('|')
611             if len(fields) == 8:
612                 vm_name = fields[2]
613                 ip_info = fields[-2]
614                 ip = self.extract_ip_from_str(net_addr, net_addr_len, ip_info)
615                 vm_ip_dict[vm_name] = ip
616
617         return vm_ip_dict
618
619     def remove_old_vms(self, vm_list, prefix):
620         """
621         For vms with format name_timestamp, only keep the one with the latest timestamp.
622         E.g.,
623             zdcpe1cpe01brgemu01_201805222148        (drop this)
624             zdcpe1cpe01brgemu01_201805222229        (keep this)
625             zdcpe1cpe01gw01_201805162201
626         """
627         new_vm_list = []
628         same_type_vm_dict = {}
629         for vm in vm_list:
630             fields = vm.split('_')
631             if vm.startswith(prefix) and len(fields) == 2 and len(fields[-1]) == len('201805222148') and fields[-1].isdigit():
632                 if vm > same_type_vm_dict.get(fields[0], '0'):
633                     same_type_vm_dict[fields[0]] = vm
634             else:
635                 new_vm_list.append(vm)
636
637         new_vm_list.extend(same_type_vm_dict.values())
638         return new_vm_list
639
640     def select_subset_vm_ip(self, all_vm_ip_dict, vm_name_keyword_list):
641         vm_ip_dict = {}
642         for keyword in vm_name_keyword_list:
643             for vm, ip in all_vm_ip_dict.items():
644                 if keyword in vm:
645                     vm_ip_dict[keyword] = ip
646                     break
647         return vm_ip_dict
648
649     def del_vgmux_ves_mode(self):
650         url = self.vpp_ves_url.format(self.hosts['mux']) + '/mode'
651         r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
652         self.logger.debug('%s', r)
653
654     def del_vgmux_ves_collector(self):
655         url = self.vpp_ves_url.format(self.hosts['mux']) + '/config'
656         r = requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
657         self.logger.debug('%s', r)
658
659     def set_vgmux_ves_collector(self ):
660         url = self.vpp_ves_url.format(self.hosts['mux'])
661         data = {'config':
662                     {'server-addr': self.hosts[self.dcae_ves_collector_name],
663                      'server-port': '30235' if self.oom_mode else '8081',
664                      'read-interval': '10',
665                      'is-add':'1'
666                      }
667                 }
668         r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
669         self.logger.debug('%s', r)
670
671     def set_vgmux_packet_loss_rate(self, lossrate, vg_vnf_instance_name):
672         url = self.vpp_ves_url.format(self.hosts['mux'])
673         data = {"mode":
674                     {"working-mode": "demo",
675                      "base-packet-loss": str(lossrate),
676                      "source-name": vg_vnf_instance_name
677                      }
678                 }
679         r = requests.post(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass, json=data)
680         self.logger.debug('%s', r)
681
682         # return all the VxLAN interface names of BRG or vGMUX based on the IP address
683     def get_vxlan_interfaces(self, ip, print_info=False):
684         url = self.vpp_inf_url.format(ip)
685         self.logger.debug('url is this: %s', url)
686         r = requests.get(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
687         data = r.json()['interfaces']['interface']
688         if print_info:
689             for inf in data:
690                 if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel':
691                     print(json.dumps(inf, indent=4, sort_keys=True))
692
693         return [inf['name'] for inf in data if 'name' in inf and 'type' in inf and inf['type'] == 'v3po:vxlan-tunnel']
694
695     # delete all VxLAN interfaces of each hosts
696     def delete_vxlan_interfaces(self, host_dic):
697         for host, ip in host_dic.items():
698             deleted = False
699             self.logger.info('{0}: Getting VxLAN interfaces'.format(host))
700             inf_list = self.get_vxlan_interfaces(ip)
701             for inf in inf_list:
702                 deleted = True
703                 time.sleep(2)
704                 self.logger.info("{0}: Deleting VxLAN crossconnect {1}".format(host, inf))
705                 url = self.vpp_inf_url.format(ip) + '/interface/' + inf + '/v3po:l2'
706                 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
707
708             for inf in inf_list:
709                 deleted = True
710                 time.sleep(2)
711                 self.logger.info("{0}: Deleting VxLAN interface {1}".format(host, inf))
712                 url = self.vpp_inf_url.format(ip) + '/interface/' + inf
713                 requests.delete(url, headers=self.vpp_api_headers, auth=self.vpp_api_userpass)
714
715             if len(self.get_vxlan_interfaces(ip)) > 0:
716                 self.logger.error("Error deleting VxLAN from {0}, try to restart the VM, IP is {1}.".format(host, ip))
717                 return False
718
719             if not deleted:
720                 self.logger.info("{0}: no VxLAN interface found, nothing to delete".format(host))
721         return True
722
723     @staticmethod
724     def save_object(obj, filepathname):
725         with open(filepathname, 'wb') as fout:
726             pickle.dump(obj, fout)
727
728     @staticmethod
729     def load_object(filepathname):
730         with open(filepathname, 'rb') as fin:
731             return pickle.load(fin)
732
733     @staticmethod
734     def increase_ip_address_or_vni_in_template(vnf_template_file, vnf_parameter_name_list):
735         with open(vnf_template_file) as json_input:
736             json_data = json.load(json_input)
737             param_list = json_data['VNF-API:input']['VNF-API:vnf-topology-information']['VNF-API:vnf-parameters']
738             for param in param_list:
739                 if param['vnf-parameter-name'] in vnf_parameter_name_list:
740                     ipaddr_or_vni = param['vnf-parameter-value'].split('.')
741                     number = int(ipaddr_or_vni[-1])
742                     if 254 == number:
743                         number = 10
744                     else:
745                         number = number + 1
746                     ipaddr_or_vni[-1] = str(number)
747                     param['vnf-parameter-value'] = '.'.join(ipaddr_or_vni)
748
749         assert json_data is not None
750         with open(vnf_template_file, 'w') as json_output:
751             json.dump(json_data, json_output, indent=4, sort_keys=True)
752
753     def save_preload_data(self, preload_data):
754         self.save_object(preload_data, self.preload_dict_file)
755
756     def load_preload_data(self):
757         return self.load_object(self.preload_dict_file)
758
759     def save_vgmux_vnf_name(self, vgmux_vnf_name):
760         self.save_object(vgmux_vnf_name, self.vgmux_vnf_name_file)
761
762     def load_vgmux_vnf_name(self):
763         return self.load_object(self.vgmux_vnf_name_file)
764