2 # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # * See the License for the specific language governing permissions and
14 # * limitations under the License.
21 from contextlib import contextmanager
23 from cinderclient import client as cinderclient
24 from keystoneauth1 import loading, session
25 import novaclient.client as nvclient
26 import neutronclient.v2_0.client as neclient
27 from retrying import retry
29 from cosmo_tester.framework.handlers import (
31 BaseCloudifyInputsConfigReader)
32 from cosmo_tester.framework.util import get_actual_keypath
34 logging.getLogger('neutronclient.client').setLevel(logging.INFO)
35 logging.getLogger('novaclient.client').setLevel(logging.INFO)
38 VOLUME_TERMINATION_TIMEOUT_SECS = 300
41 class OpenstackCleanupContext(BaseHandler.CleanupContext):
43 def __init__(self, context_name, env):
44 super(OpenstackCleanupContext, self).__init__(context_name, env)
45 self.before_run = self.env.handler.openstack_infra_state()
49 Cleans resources created by the test.
50 Resource that existed before the test will not be removed
52 super(OpenstackCleanupContext, self).cleanup()
53 resources_to_teardown = self.get_resources_to_teardown(
54 self.env, resources_to_keep=self.before_run)
56 self.logger.warn('[{0}] SKIPPING cleanup of resources: {1}'
57 .format(self.context_name, resources_to_teardown))
59 self._clean(self.env, resources_to_teardown)
62 def clean_all(cls, env):
64 Cleans *all* resources, including resources that were not
67 super(OpenstackCleanupContext, cls).clean_all(env)
68 resources_to_teardown = cls.get_resources_to_teardown(env)
69 cls._clean(env, resources_to_teardown)
72 def _clean(cls, env, resources_to_teardown):
73 cls.logger.info('Openstack handler will try to remove these resources:'
74 ' {0}'.format(resources_to_teardown))
75 failed_to_remove = env.handler.remove_openstack_resources(
76 resources_to_teardown)
78 trimmed_failed_to_remove = {key: value for key, value in
79 failed_to_remove.iteritems()
81 if len(trimmed_failed_to_remove) > 0:
82 msg = 'Openstack handler failed to remove some resources:' \
83 ' {0}'.format(trimmed_failed_to_remove)
85 raise RuntimeError(msg)
88 def get_resources_to_teardown(cls, env, resources_to_keep=None):
89 all_existing_resources = env.handler.openstack_infra_state()
91 return env.handler.openstack_infra_state_delta(
92 before=resources_to_keep, after=all_existing_resources)
94 return all_existing_resources
96 def update_server_id(self, server_name):
98 # retrieve the id of the new server
99 nova, _, _ = self.env.handler.openstack_clients()
100 servers = nova.servers.list(
101 search_opts={'name': server_name})
104 'Expected 1 server with name {0}, but found {1}'
105 .format(server_name, len(servers)))
107 new_server_id = servers[0].id
109 # retrieve the id of the old server
111 servers = self.before_run['servers']
112 for server_id, name in servers.iteritems():
113 if server_name == name:
114 old_server_id = server_id
116 if old_server_id is None:
118 'Could not find a server with name {0} '
119 'in the internal cleanup context state'
120 .format(server_name))
122 # replace the id in the internal state
123 servers[new_server_id] = servers.pop(old_server_id)
126 class CloudifyOpenstackInputsConfigReader(BaseCloudifyInputsConfigReader):
128 def __init__(self, cloudify_config, manager_blueprint_path, **kwargs):
129 super(CloudifyOpenstackInputsConfigReader, self).__init__(
130 cloudify_config, manager_blueprint_path=manager_blueprint_path,
135 return self.config['region']
138 def management_server_name(self):
139 return self.config['manager_server_name']
142 def agent_key_path(self):
143 return self.config['agent_private_key_path']
146 def management_user_name(self):
147 return self.config['ssh_user']
150 def management_key_path(self):
151 return self.config['ssh_key_filename']
154 def agent_keypair_name(self):
155 return self.config['agent_public_key_name']
158 def management_keypair_name(self):
159 return self.config['manager_public_key_name']
162 def use_existing_agent_keypair(self):
163 return self.config['use_existing_agent_keypair']
166 def use_existing_manager_keypair(self):
167 return self.config['use_existing_manager_keypair']
170 def external_network_name(self):
171 return self.config['external_network_name']
174 def keystone_username(self):
175 return self.config['keystone_username']
178 def keystone_password(self):
179 return self.config['keystone_password']
182 def keystone_tenant_name(self):
183 return self.config['keystone_tenant_name']
186 def keystone_url(self):
187 return self.config['keystone_url']
190 def neutron_url(self):
191 return self.config.get('neutron_url', None)
194 def management_network_name(self):
195 return self.config['management_network_name']
198 def management_subnet_name(self):
199 return self.config['management_subnet_name']
202 def management_router_name(self):
203 return self.config['management_router']
206 def agents_security_group(self):
207 return self.config['agents_security_group_name']
210 def management_security_group(self):
211 return self.config['manager_security_group_name']
214 class OpenstackHandler(BaseHandler):
216 CleanupContext = OpenstackCleanupContext
217 CloudifyConfigReader = CloudifyOpenstackInputsConfigReader
219 def before_bootstrap(self):
220 super(OpenstackHandler, self).before_bootstrap()
221 with self.update_cloudify_config() as patch:
222 suffix = '-%06x' % random.randrange(16 ** 6)
223 server_name_prop_path = 'manager_server_name'
224 patch.append_value(server_name_prop_path, suffix)
226 def after_bootstrap(self, provider_context):
227 super(OpenstackHandler, self).after_bootstrap(provider_context)
228 resources = provider_context['resources']
229 agent_keypair = resources['agents_keypair']
230 management_keypair = resources['management_keypair']
231 self.remove_agent_keypair = agent_keypair['external_resource'] is False
232 self.remove_management_keypair = \
233 management_keypair['external_resource'] is False
235 def after_teardown(self):
236 super(OpenstackHandler, self).after_teardown()
237 if self.remove_agent_keypair:
238 agent_key_path = get_actual_keypath(self.env,
239 self.env.agent_key_path,
240 raise_on_missing=False)
242 os.remove(agent_key_path)
243 if self.remove_management_keypair:
244 management_key_path = get_actual_keypath(
246 self.env.management_key_path,
247 raise_on_missing=False)
248 if management_key_path:
249 os.remove(management_key_path)
251 def openstack_clients(self):
252 creds = self._client_creds()
254 'region_name': creds.pop('region_name'),
257 loader = loading.get_plugin_loader("password")
258 auth = loader.load_from_options(**creds)
259 sess = session.Session(auth=auth, verify=True)
261 params['session'] = sess
263 nova = nvclient.Client('2', **params)
264 neutron = neclient.Client(**params)
265 cinder = cinderclient.Client('2', **params)
267 return (nova, neutron, cinder)
269 @retry(stop_max_attempt_number=5, wait_fixed=20000)
270 def openstack_infra_state(self):
272 @retry decorator is used because this error sometimes occur:
273 ConnectionFailed: Connection to neutron failed: Maximum
276 nova, neutron, cinder = self.openstack_clients()
278 prefix = self.env.resources_prefix
279 except (AttributeError, KeyError):
282 'networks': dict(self._networks(neutron, prefix)),
283 'subnets': dict(self._subnets(neutron, prefix)),
284 'routers': dict(self._routers(neutron, prefix)),
285 'security_groups': dict(self._security_groups(neutron, prefix)),
286 'servers': dict(self._servers(nova, prefix)),
287 'key_pairs': dict(self._key_pairs(nova, prefix)),
288 'floatingips': dict(self._floatingips(neutron, prefix)),
289 'ports': dict(self._ports(neutron, prefix)),
290 'volumes': dict(self._volumes(cinder, prefix))
293 def openstack_infra_state_delta(self, before, after):
294 after = copy.deepcopy(after)
296 prop: self._remove_keys(after[prop], before[prop].keys())
300 def _find_keypairs_to_delete(self, nodes, node_instances):
301 """Filter the nodes only returning the names of keypair nodes
303 Examine node_instances and nodes, return the external_name of
304 those node_instances, which correspond to a node that has a
307 To filter by deployment_id, simply make sure that the nodes and
308 node_instances this method receives, are pre-filtered
309 (ie. filter the nodes while fetching them from the manager)
311 keypairs = set() # a set of (deployment_id, node_id) tuples
314 if node.get('type') != 'cloudify.openstack.nodes.KeyPair':
316 # deployment_id isnt always present in local_env runs
317 key = (node.get('deployment_id'), node['id'])
320 for node_instance in node_instances:
321 key = (node_instance.get('deployment_id'),
322 node_instance['node_id'])
323 if key not in keypairs:
326 runtime_properties = node_instance['runtime_properties']
327 if not runtime_properties:
329 name = runtime_properties.get('external_name')
333 def _delete_keypairs_by_name(self, keypair_names):
334 nova, neutron, cinder = self.openstack_clients()
335 existing_keypairs = nova.keypairs.list()
337 for name in keypair_names:
338 for keypair in existing_keypairs:
339 if keypair.name == name:
340 nova.keypairs.delete(keypair)
342 def remove_keypairs_from_local_env(self, local_env):
343 """Query the local_env for nodes which are keypairs, remove them
345 Similar to querying the manager, we can look up nodes in the local_env
346 which is used for tests.
348 nodes = local_env.storage.get_nodes()
349 node_instances = local_env.storage.get_node_instances()
350 names = self._find_keypairs_to_delete(nodes, node_instances)
351 self._delete_keypairs_by_name(names)
353 def remove_keypairs_from_manager(self, deployment_id=None,
355 """Query the manager for nodes by deployment_id, delete keypairs
357 Fetch nodes and node_instances from the manager by deployment_id
358 (or all if not given), find which ones represent openstack keypairs,
361 if rest_client is None:
362 rest_client = self.env.rest_client
364 nodes = rest_client.nodes.list(deployment_id=deployment_id)
365 node_instances = rest_client.node_instances.list(
366 deployment_id=deployment_id)
367 keypairs = self._find_keypairs_to_delete(nodes, node_instances)
368 self._delete_keypairs_by_name(keypairs)
370 def remove_keypair(self, name):
371 """Delete an openstack keypair by name. If it doesnt exist, do nothing.
373 self._delete_keypairs_by_name([name])
375 def remove_openstack_resources(self, resources_to_remove):
376 # basically sort of a workaround, but if we get the order wrong
377 # the first time, there is a chance things would better next time
378 # 3'rd time can't really hurt, can it?
381 resources_to_remove = self._remove_openstack_resources_impl(
383 if all([len(g) == 0 for g in resources_to_remove.values()]):
385 # give openstack some time to update its data structures
387 return resources_to_remove
389 def _remove_openstack_resources_impl(self, resources_to_remove):
390 nova, neutron, cinder = self.openstack_clients()
392 servers = nova.servers.list()
393 ports = neutron.list_ports()['ports']
394 routers = neutron.list_routers()['routers']
395 subnets = neutron.list_subnets()['subnets']
396 networks = neutron.list_networks()['networks']
397 # keypairs = nova.keypairs.list()
398 floatingips = neutron.list_floatingips()['floatingips']
399 security_groups = neutron.list_security_groups()['security_groups']
400 volumes = cinder.volumes.list()
410 'security_groups': {},
414 volumes_to_remove = []
415 for volume in volumes:
416 if volume.id in resources_to_remove['volumes']:
417 volumes_to_remove.append(volume)
419 left_volumes = self._delete_volumes(nova, cinder, volumes_to_remove)
420 for volume_id, ex in left_volumes.iteritems():
421 failed['volumes'][volume_id] = ex
423 for server in servers:
424 if server.id in resources_to_remove['servers']:
425 with self._handled_exception(server.id, failed, 'servers'):
426 nova.servers.delete(server)
428 for router in routers:
429 if router['id'] in resources_to_remove['routers']:
430 with self._handled_exception(router['id'], failed, 'routers'):
431 for p in neutron.list_ports(
432 device_id=router['id'])['ports']:
433 neutron.remove_interface_router(router['id'], {
436 neutron.delete_router(router['id'])
439 if port['id'] in resources_to_remove['ports']:
440 with self._handled_exception(port['id'], failed, 'ports'):
441 neutron.delete_port(port['id'])
443 for subnet in subnets:
444 if subnet['id'] in resources_to_remove['subnets']:
445 with self._handled_exception(subnet['id'], failed, 'subnets'):
446 neutron.delete_subnet(subnet['id'])
448 for network in networks:
449 if network['name'] == self.env.external_network_name:
451 if network['id'] in resources_to_remove['networks']:
452 with self._handled_exception(network['id'], failed,
454 neutron.delete_network(network['id'])
456 # TODO: implement key-pair creation and cleanup per tenant
458 # IMPORTANT: Do not remove key-pairs, they might be used
459 # by another tenant (of the same user)
461 # for key_pair in keypairs:
462 # if key_pair.name == self.env.agent_keypair_name and \
463 # self.env.use_existing_agent_keypair:
464 # # this is a pre-existing agent key-pair, do not remove
466 # elif key_pair.name == self.env.management_keypair_name and \
467 # self.env.use_existing_manager_keypair:
468 # # this is a pre-existing manager key-pair, do not remove
470 # elif key_pair.id in resources_to_remove['key_pairs']:
471 # with self._handled_exception(key_pair.id, failed,
473 # nova.keypairs.delete(key_pair)
475 for floatingip in floatingips:
476 if floatingip['id'] in resources_to_remove['floatingips']:
477 with self._handled_exception(floatingip['id'], failed,
479 neutron.delete_floatingip(floatingip['id'])
481 for security_group in security_groups:
482 if security_group['name'] == 'default':
484 if security_group['id'] in resources_to_remove['security_groups']:
485 with self._handled_exception(security_group['id'],
486 failed, 'security_groups'):
487 neutron.delete_security_group(security_group['id'])
491 def _delete_volumes(self, nova, cinder, existing_volumes):
493 end_time = time.time() + VOLUME_TERMINATION_TIMEOUT_SECS
495 for volume in existing_volumes:
497 if volume.status in ['available', 'error', 'in-use']:
499 self.logger.info('Detaching volume {0} ({1}), currently in'
501 format(volume.name, volume.id,
503 for attachment in volume.attachments:
504 nova.volumes.delete_server_volume(
505 server_id=attachment['server_id'],
506 attachment_id=attachment['id'])
507 except Exception as e:
508 self.logger.warning('Attempt to detach volume {0} ({1})'
509 ' yielded exception: "{2}"'.
510 format(volume.name, volume.id,
512 unremovables[volume.id] = e
513 existing_volumes.remove(volume)
516 for volume in existing_volumes:
518 if volume.status in ['available', 'error', 'in-use']:
520 self.logger.info('Deleting volume {0} ({1}), currently in'
522 format(volume.name, volume.id,
524 cinder.volumes.delete(volume)
525 except Exception as e:
526 self.logger.warning('Attempt to delete volume {0} ({1})'
527 ' yielded exception: "{2}"'.
528 format(volume.name, volume.id,
530 unremovables[volume.id] = e
531 existing_volumes.remove(volume)
533 # wait for all volumes deletion until completed or timeout is reached
534 while existing_volumes and time.time() < end_time:
536 for volume in existing_volumes:
537 volume_id = volume.id
538 volume_name = volume.name
540 vol = cinder.volumes.get(volume_id)
541 if vol.status == 'deleting':
542 self.logger.debug('volume {0} ({1}) is being '
543 'deleted...'.format(volume_name,
546 self.logger.warning('volume {0} ({1}) is in '
547 'unexpected status: {2}'.
548 format(volume_name, volume_id,
550 except Exception as e:
551 # the volume wasn't found, it was deleted
552 if hasattr(e, 'code') and e.code == 404:
553 self.logger.info('deleted volume {0} ({1})'.
554 format(volume_name, volume_id))
555 existing_volumes.remove(volume)
557 self.logger.warning('failed to remove volume {0} '
558 '({1}), exception: {2}'.
561 unremovables[volume_id] = e
562 existing_volumes.remove(volume)
565 for volume in existing_volumes:
566 # try to get the volume's status
568 vol = cinder.volumes.get(volume.id)
569 vol_status = vol.status
571 # failed to get volume... status is unknown
572 vol_status = 'unknown'
574 unremovables[volume.id] = 'timed out while removing volume '\
575 '{0} ({1}), current volume status '\
576 'is {2}'.format(volume.name,
581 self.logger.warning('failed to remove volumes: {0}'.format(
586 def _client_creds(self):
588 'username': self.env.keystone_username,
589 'password': self.env.keystone_password,
590 'auth_url': self.env.keystone_url,
591 'project_name': self.env.keystone_tenant_name,
592 'region_name': self.env.region
595 def _networks(self, neutron, prefix):
596 return [(n['id'], n['name'])
597 for n in neutron.list_networks()['networks']
598 if self._check_prefix(n['name'], prefix)]
600 def _subnets(self, neutron, prefix):
601 return [(n['id'], n['name'])
602 for n in neutron.list_subnets()['subnets']
603 if self._check_prefix(n['name'], prefix)]
605 def _routers(self, neutron, prefix):
606 return [(n['id'], n['name'])
607 for n in neutron.list_routers()['routers']
608 if self._check_prefix(n['name'], prefix)]
610 def _security_groups(self, neutron, prefix):
611 return [(n['id'], n['name'])
612 for n in neutron.list_security_groups()['security_groups']
613 if self._check_prefix(n['name'], prefix)]
615 def _servers(self, nova, prefix):
616 return [(s.id, s.human_id)
617 for s in nova.servers.list()
618 if self._check_prefix(s.human_id, prefix)]
620 def _key_pairs(self, nova, prefix):
621 return [(kp.id, kp.name)
622 for kp in nova.keypairs.list()
623 if self._check_prefix(kp.name, prefix)]
625 def _floatingips(self, neutron, prefix):
626 return [(ip['id'], ip['floating_ip_address'])
627 for ip in neutron.list_floatingips()['floatingips']]
629 def _ports(self, neutron, prefix):
630 return [(p['id'], p['name'])
631 for p in neutron.list_ports()['ports']
632 if self._check_prefix(p['name'], prefix)]
634 def _volumes(self, cinder, prefix):
635 return [(v.id, v.name) for v in cinder.volumes.list()
636 if self._check_prefix(v.name, prefix)]
638 def _check_prefix(self, name, prefix):
639 # some openstack resources (eg. volumes) can have no display_name,
640 # in which case it's None
641 return name is None or name.startswith(prefix)
643 def _remove_keys(self, dct, keys):
650 def _handled_exception(self, resource_id, failed, resource_group):
653 except BaseException, ex:
654 failed[resource_group][resource_id] = ex
657 handler = OpenstackHandler