Add support to process NSST Selection for HAS
[optf/has.git] / conductor / conductor / solver / service.py
1 #
2 # -------------------------------------------------------------------------
3 #   Copyright (c) 2015-2017 AT&T Intellectual Property
4 #   Copyright (C) 2020 Wipro Limited.
5 #
6 #   Licensed under the Apache License, Version 2.0 (the "License");
7 #   you may not use this file except in compliance with the License.
8 #   You may obtain a copy of the License at
9 #
10 #       http://www.apache.org/licenses/LICENSE-2.0
11 #
12 #   Unless required by applicable law or agreed to in writing, software
13 #   distributed under the License is distributed on an "AS IS" BASIS,
14 #   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 #   See the License for the specific language governing permissions and
16 #   limitations under the License.
17 #
18 # -------------------------------------------------------------------------
19 #
20
21 import collections
22 import cotyledon
23 import json
24 import socket
25 import time
26 import traceback
27
28 from oslo_config import cfg
29 from oslo_log import log
30
31 from conductor.common import db_backend
32 from conductor.common.models import country_latency
33 from conductor.common.models import order_lock
34 from conductor.common.models.order_lock import OrderLock
35 from conductor.common.models import order_lock_history
36 from conductor.common.models import plan
37 from conductor.common.models import region_placeholders
38 from conductor.common.models import triage_tool
39 from conductor.common.music import messaging as music_messaging
40 from conductor.common.music.model import base
41 import conductor.common.prometheus_metrics as PC
42 from conductor.common.utils import conductor_logging_util as log_util
43 from conductor.i18n import _LE
44 from conductor.i18n import _LI
45 from conductor import messaging
46 from conductor import service
47 from conductor.solver.optimizer import optimizer
48 from conductor.solver.request import parser
49 from conductor.solver.utils import constraint_engine_interface as cei
50
51 # To use oslo.log in services:
52 #
53 # 0. Note that conductor.service.prepare_service() bootstraps this.
54 #    It's set up within conductor.cmd.SERVICENAME.
55 # 1. Add "from oslo_log import log"
56 # 2. Also add "LOG = log.getLogger(__name__)"
57 # 3. For i18n support, import appropriate shortcuts as well:
58 #    "from i18n import _, _LC, _LE, _LI, _LW  # noqa"
59 #    (that's for primary, critical, error, info, warning)
60 # 4. Use LOG.info, LOG.warning, LOG.error, LOG.critical, LOG.debug, e.g.:
61 #    "LOG.info(_LI("Something happened with {}").format(thingie))"
62 # 5. Do NOT put translation wrappers around any LOG.debug text.
63 # 6. Be liberal with logging, especially in the absence of unit tests!
64 # 7. Calls to print() are verboten within the service proper.
65 #    Logging can be redirected! (In a CLI-side script, print() is fine.)
66 #
67 # Usage: http://docs.openstack.org/developer/oslo.i18n/usage.html
68
69 LOG = log.getLogger(__name__)
70
71 # To use oslo.config in services:
72 #
73 # 0. Note that conductor.service.prepare_service() bootstraps this.
74 #    It's set up within conductor.cmd.SERVICENAME.
75 # 1. Add "from oslo_config import cfg"
76 # 2. Also add "CONF = cfg.CONF"
77 # 3. Set a list of locally used options (SOLVER_OPTS is fine).
78 #    Choose key names thoughtfully. Be technology-agnostic, avoid TLAs, etc.
79 # 4. Register, e.g. "CONF.register_opts(SOLVER_OPTS, group='solver')"
80 # 5. Add file reference to opts.py (may need to use itertools.chain())
81 # 6. Run tox -e genconfig to build a new config template.
82 # 7. If you want to load an entire config from a CLI you can do this:
83 #    "conf = service.prepare_service([], config_files=[CONFIG_FILE])"
84 # 8. You can even use oslo_config from a CLI and override values on the fly,
85 #    e.g., "CONF.set_override('hostnames', ['music2'], 'music_api')"
86 #    (leave the third arg out to use the DEFAULT group).
87 # 9. Loading a config from a CLI is optional. So long as all the options
88 #    have defaults (or you override them as needed), it should all work.
89 #
90 # Docs: http://docs.openstack.org/developer/oslo.config/
91
92 CONF = cfg.CONF
93
94 SOLVER_OPTS = [
95     cfg.IntOpt('workers',
96                default=1,
97                min=1,
98                help='Number of workers for solver service. '
99                     'Default value is 1.'),
100     cfg.IntOpt('solver_timeout',
101                default=480,
102                min=1,
103                help='The timeout value for solver service. '
104                     'Default value is 480 seconds.'),
105     cfg.BoolOpt('concurrent',
106                 default=False,
107                 help='Set to True when solver will run in active-active '
108                      'mode. When set to False, solver will restart any '
109                      'orphaned solving requests at startup.'),
110     cfg.IntOpt('timeout',
111                default=600,
112                min=1,
113                help='Timeout for detecting a VM is down, and other VMs can pick the plan up. '
114                     'This value should be larger than solver_timeout'
115                     'Default value is 10 minutes. (integer value)'),
116     cfg.IntOpt('max_solver_counter',
117                default=1,
118                min=1)
119 ]
120
121 CONF.register_opts(SOLVER_OPTS, group='solver')
122
123 # Pull in service opts. We use them here.
124 OPTS = service.OPTS
125 CONF.register_opts(OPTS)
126
127
128 class SolverServiceLauncher(object):
129     """Launcher for the solver service."""
130     def __init__(self, conf):
131
132         self.conf = conf
133
134         # Set up Music access.
135         self.music = db_backend.get_client()
136         self.music.keyspace_create(keyspace=conf.keyspace)
137
138         # Dynamically create a plan class for the specified keyspace
139         self.Plan = base.create_dynamic_model(
140             keyspace=conf.keyspace, baseclass=plan.Plan, classname="Plan")
141         self.OrderLock = base.create_dynamic_model(
142             keyspace=conf.keyspace, baseclass=order_lock.OrderLock, classname="OrderLock")
143         self.OrderLockHistory = base.create_dynamic_model(
144             keyspace=conf.keyspace, baseclass=order_lock_history.OrderLockHistory, classname="OrderLockHistory")
145         self.RegionPlaceholders = base.create_dynamic_model(
146             keyspace=conf.keyspace, baseclass=region_placeholders.RegionPlaceholders, classname="RegionPlaceholders")
147         self.CountryLatency = base.create_dynamic_model(
148             keyspace=conf.keyspace, baseclass=country_latency.CountryLatency, classname="CountryLatency")
149         self.TriageTool = base.create_dynamic_model(
150             keyspace=conf.keyspace, baseclass=triage_tool.TriageTool, classname="TriageTool")
151         # self.Groups = base.create_dynamic_model(
152         #    keyspace=conf.keyspace, baseclass=groups.Groups, classname="Groups")
153         # self.GroupRules = base.create_dynamic_model(
154         #    keyspace=conf.keyspace, baseclass=group_rules.GroupRules, classname="GroupRules")
155
156         # Initialize Prometheus metrics Endpoint
157         # Solver service uses index 1
158         PC._init_metrics(1)
159
160         if not self.Plan:
161             raise
162         if not self.OrderLock:
163             raise
164         if not self.OrderLockHistory:
165             raise
166         if not self.RegionPlaceholders:
167             raise
168         if not self.CountryLatency:
169             raise
170         if not self.TriageTool:
171             raise
172
173     def run(self):
174         kwargs = {'plan_class': self.Plan,
175                   'order_locks': self.OrderLock,
176                   'order_locks_history': self.OrderLockHistory,
177                   'region_placeholders': self.RegionPlaceholders,
178                   'country_latency': self.CountryLatency,
179                   'triage_tool': self.TriageTool
180                   }
181         # kwargs = {'plan_class': self.Plan}
182         svcmgr = cotyledon.ServiceManager()
183         svcmgr.add(SolverService,
184                    workers=self.conf.solver.workers,
185                    args=(self.conf,), kwargs=kwargs)
186         svcmgr.run()
187
188
189 class SolverService(cotyledon.Service):
190     """Solver service."""
191
192     # This will appear in 'ps xaf'
193     name = "Conductor Solver"
194
195     regions = collections.OrderedDict()
196     countries = list()
197
198     def __init__(self, worker_id, conf, **kwargs):
199         """Initializer"""
200
201         LOG.debug("{}".format(self.__class__.__name__))
202         super(SolverService, self).__init__(worker_id)
203         self._init(conf, **kwargs)
204         self.running = True
205
206     def _init(self, conf, **kwargs):
207         """Set up the necessary ingredients."""
208         self.conf = conf
209         self.kwargs = kwargs
210
211         self.Plan = kwargs.get('plan_class')
212         self.OrderLock = kwargs.get('order_locks')
213         self.OrderLockHistory = kwargs.get('order_locks_history')
214         self.RegionPlaceholders = kwargs.get('region_placeholders')
215         self.CountryLatency = kwargs.get('country_latency')
216         self.TriageTool = kwargs.get('triage_tool')
217
218         # Set up the RPC service(s) we want to talk to.
219         self.data_service = self.setup_rpc(conf, "data")
220
221         # Set up the cei and optimizer
222         self.cei = cei.ConstraintEngineInterface(self.data_service)
223         # self.optimizer = optimizer.Optimizer(conf)
224
225         # Set up Music access.
226         self.music = db_backend.get_client()
227         self.solver_owner_condition = {
228             "solver_owner": socket.gethostname()
229         }
230         self.translated_status_condition = {
231             "status": self.Plan.TRANSLATED
232         }
233         self.solving_status_condition = {
234             "status": self.Plan.SOLVING
235         }
236
237         if not self.conf.solver.concurrent:
238             self._reset_solving_status()
239
240     def _gracefully_stop(self):
241         """Gracefully stop working on things"""
242         pass
243
244     def current_time_seconds(self):
245         """Current time in milliseconds."""
246         return int(round(time.time()))
247
248     def _reset_solving_status(self):
249         """Reset plans being solved so they are solved again.
250
251         Use this only when the solver service is not running concurrently.
252         """
253
254         plans = self.Plan.query.get_plan_by_col("status", self.Plan.SOLVING)
255         for the_plan in plans:
256             the_plan.status = self.Plan.TRANSLATED
257             # Use only in active-passive mode, so don't have to be atomic
258             the_plan.update()
259
260     def _restart(self):
261         """Prepare to restart the service"""
262         pass
263
264     def millisec_to_sec(self, millisec):
265         """Convert milliseconds to seconds"""
266         return millisec / 1000
267
268     def setup_rpc(self, conf, topic):
269         """Set up the RPC Client"""
270         # TODO(jdandrea): Put this pattern inside music_messaging?
271         transport = messaging.get_transport(conf=conf)
272         target = music_messaging.Target(topic=topic)
273         client = music_messaging.RPCClient(conf=conf,
274                                            transport=transport,
275                                            target=target)
276         return client
277
278     def run(self):
279         """Run"""
280         LOG.debug("{}".format(self.__class__.__name__))
281         # TODO(snarayanan): This is really meant to be a control loop
282         # As long as self.running is true, we process another request.
283
284         while self.running:
285
286             # Delay time (Seconds) for MUSIC requests.
287             time.sleep(self.conf.delay_time)
288
289             # plans = Plan.query().all()
290             # Find the first plan with a status of TRANSLATED.
291             # Change its status to SOLVING.
292             # Then, read the "translated" field as "template".
293             json_template = None
294             p = None
295
296             requests_to_solve = dict()
297             regions_maps = dict()
298             country_groups = list()
299
300             # Instead of using the query.all() method, now creating an index for 'status'
301             # field in conductor.plans table, and query plans by status columns
302             translated_plans = self.Plan.query.get_plan_by_col("status", self.Plan.TRANSLATED)
303             solving_plans = self.Plan.query.get_plan_by_col("status", self.Plan.SOLVING)
304
305             # combine the plans with status = 'translated' and 'solving' together
306             plans = translated_plans + solving_plans
307
308             found_translated_template = False
309
310             for p in plans:
311                 if p.status == self.Plan.TRANSLATED:
312                     json_template = p.translation
313                     found_translated_template = True
314                     break
315                 elif p.status == self.Plan.SOLVING and (self.current_time_seconds()
316                                                         - self.millisec_to_sec(p.updated)) > self.conf.solver.timeout:
317                     p.status = self.Plan.TRANSLATED
318                     p.update(condition=self.solving_status_condition)
319                     break
320
321             if not json_template:
322                 if found_translated_template:
323                     message = _LE("Plan {} status is translated, yet "
324                                   "the translation wasn't found").format(p.id)
325                     LOG.error(message)
326                     p.status = self.Plan.ERROR
327                     p.message = message
328                     p.update(condition=self.translated_status_condition)
329                 continue
330
331             if found_translated_template and p and p.solver_counter >= self.conf.solver.max_solver_counter:
332                 message = _LE("Tried {} times. Plan {} is unable to solve").format(self.conf.solver.max_solver_counter,
333                                                                                    p.id)
334                 LOG.error(message)
335                 p.status = self.Plan.ERROR
336                 p.message = message
337                 p.update(condition=self.translated_status_condition)
338                 continue
339
340             log_util.setLoggerFilter(LOG, self.conf.keyspace, p.id)
341
342             p.status = self.Plan.SOLVING
343             p.solver_counter += 1
344             p.solver_owner = socket.gethostname()
345
346             _is_updated = p.update(condition=self.translated_status_condition)
347             if not _is_updated:
348                 continue
349
350             # other VMs have updated the status and start solving the plan
351             if 'FAILURE' in _is_updated:
352                 continue
353
354             LOG.info(_LI("Sovling starts, changing the template status from translated to solving, "
355                          "atomic update response from MUSIC {}").format(_is_updated))
356
357             LOG.info(_LI("Plan {} with request id {} is solving by machine {}. Tried to solve it for {} times.").
358                      format(p.id, p.name, p.solver_owner, p.solver_counter))
359
360             _is_success = "FAILURE"
361             request = parser.Parser()
362             request.cei = self.cei
363             request.request_id = p.name
364             request.plan_id = p.id
365             # getting the number of solutions need to provide
366             num_solution = getattr(p, 'recommend_max', '1')
367             if num_solution.isdigit():
368                 num_solution = int(num_solution)
369
370             # TODO(inam/larry): move this part of logic inside of parser and don't apply it to distance_between
371             try:
372                 # getting region placeholders from database and insert/put into regions_maps dictionary
373                 region_placeholders = self.RegionPlaceholders.query.all()
374                 for region in region_placeholders:
375                     regions_maps.update(region.countries)
376
377                 # getting country groups from database and insert into the country_groups list
378                 customer_loc = ''
379                 location_list = json_template["conductor_solver"]["locations"]
380                 for location_id, location_info in location_list.items():
381                     customer_loc = location_info['country']
382
383                 countries = self.CountryLatency.query.get_plan_by_col("country_name", customer_loc)
384                 LOG.info("Customer Location for Latency Reduction " + customer_loc)
385
386                 if len(countries) == 0:
387                     LOG.info("country is not present is country latency table, looking for * wildcard entry")
388                     countries = self.CountryLatency.query.get_plan_by_col("country_name", "*")
389                 if len(countries) != 0:
390                     LOG.info("Found '*' wild card entry in country latency table")
391                 else:
392                     msg = "No '*' wild card entry found in country latency table. No solution will be provided"
393                     LOG.info(msg)
394                     p.message = msg
395
396                 for country in countries:
397                     country_groups = country.groups
398
399                 LOG.info("Done getting Latency Country DB Groups ")
400             except Exception as error_msg:
401                 LOG.error("Exception thrown while reading region_placeholders and country groups information "
402                           "from database. Exception message: {}".format(error_msg))
403
404             try:
405                 request.parse_template(json_template, country_groups, regions_maps)
406                 request.assgin_constraints_to_demands()
407                 requests_to_solve[p.id] = request
408                 opt = optimizer.Optimizer(self.conf, _requests=requests_to_solve)
409                 solution_list = opt.get_solution(num_solution)
410
411             except Exception as err:
412                 message = _LE("Plan {} status encountered a "
413                               "parsing error: {}").format(p.id, err)
414                 LOG.error(traceback.print_exc())
415                 p.status = self.Plan.ERROR
416                 p.message = message
417                 while 'FAILURE' in _is_success:
418                     _is_success = p.update(condition=self.solver_owner_condition)
419                     LOG.info(_LI("Encountered a parsing error, changing the template status from solving to error, "
420                                  "atomic update response from MUSIC {}").format(_is_success))
421
422                 continue
423
424             LOG.info("Preparing the recommendations ")
425             # checking if the order is 'initial' or 'speed changed' one
426             is_speed_change = False
427             if request and request.request_type == 'speed changed':
428                 is_speed_change = True
429
430             recommendations = []
431             if not solution_list or len(solution_list) < 1:
432                 # when order takes too much time to solve
433                 if (int(round(time.time())) - self.millisec_to_sec(p.updated)) > self.conf.solver.solver_timeout:
434                     message = _LI("Plan {} is timed out, exceed the expected "
435                                   "time {} seconds").format(p.id, self.conf.solver.timeout)
436
437                 # when no solution found
438                 else:
439                     message = _LI("Plan {} search failed, no "
440                                   "recommendations found by machine {}").format(p.id, p.solver_owner)
441                 LOG.info(message)
442                 # Update the plan status
443                 p.status = self.Plan.NOT_FOUND
444                 p.message = message
445
446                 # Metrics to Prometheus
447                 m_svc_name = p.template.get('parameters', {}).get('service_name', 'N/A')
448                 PC.VNF_FAILURE.labels('ONAP', m_svc_name).inc()
449
450                 while 'FAILURE' in _is_success:
451                     _is_success = p.update(condition=self.solver_owner_condition)
452                     LOG.info(_LI("Plan serach failed, changing the template status from solving to not found, "
453                                  "atomic update response from MUSIC {}").format(_is_success))
454             else:
455                 # Assemble recommendation result JSON
456                 for solution in solution_list:
457                     current_rec = dict()
458                     for demand_name in solution:
459                         resource = solution[demand_name]
460
461                         if not is_speed_change:
462                             is_rehome = "false"
463                         else:
464                             is_rehome = "false" if resource.get("existing_placement") == 'true' else "true"
465
466                         location_id = "" if resource.get("cloud_region_version") == '2.5' \
467                                       else resource.get("location_id")
468
469                         rec = {
470                             # FIXME(shankar) A&AI must not be hardcoded here.
471                             # Also, account for more than one Inventory Provider.
472                             "inventory_provider": "aai",
473                             "service_resource_id":
474                                 resource.get("service_resource_id"),
475                             "candidate": {
476                                 "candidate_id": resource.get("candidate_id"),
477                                 "inventory_type": resource.get("inventory_type"),
478                                 "cloud_owner": resource.get("cloud_owner"),
479                                 "location_type": resource.get("location_type"),
480                                 "location_id": location_id,
481                                 "is_rehome": is_rehome},
482                             "attributes": {
483                                 "physical-location-id":
484                                     resource.get("physical_location_id"),
485                                 "cloud_owner": resource.get("cloud_owner"),
486                                 'aic_version': resource.get("cloud_region_version")},
487                         }
488
489                         if rec["candidate"]["inventory_type"] in ["nssi", "nsi", "slice_profiles", "nst", "nsst"]:
490                             rec["candidate"] = resource
491
492                         if resource.get('vim-id'):
493                             rec["candidate"]['vim-id'] = resource.get('vim-id')
494
495                         if rec["candidate"]["inventory_type"] == "service":
496                             rec["attributes"]["host_id"] = resource.get("host_id")
497                             rec["attributes"]["service_instance_id"] = resource.get("candidate_id")
498                             rec["candidate"]["host_id"] = resource.get("host_id")
499
500                             if resource.get('vlan_key'):
501                                 rec["attributes"]['vlan_key'] = resource.get('vlan_key')
502                             if resource.get('port_key'):
503                                 rec["attributes"]['port_key'] = resource.get('port_key')
504
505                         if rec["candidate"]["inventory_type"] == "vfmodule":
506                             rec["attributes"]["host_id"] = resource.get("host_id")
507                             rec["attributes"]["service_instance_id"] = resource.get("service_instance_id")
508                             rec["candidate"]["host_id"] = resource.get("host_id")
509
510                             if resource.get('vlan_key'):
511                                 rec["attributes"]['vlan_key'] = resource.get('vlan_key')
512                             if resource.get('port_key'):
513                                 rec["attributes"]['port_key'] = resource.get('port_key')
514
515                             vf_module_data = rec["attributes"]
516                             vf_module_data['nf-name'] = resource.get("nf-name")
517                             vf_module_data['nf-id'] = resource.get("nf-id")
518                             vf_module_data['nf-type'] = resource.get("nf-type")
519                             vf_module_data['vnf-type'] = resource.get("vnf-type")
520                             vf_module_data['vf-module-id'] = resource.get("vf-module-id")
521                             vf_module_data['vf-module-name'] = resource.get("vf-module-name")
522                             vf_module_data['ipv4-oam-address'] = resource.get("ipv4-oam-address")
523                             vf_module_data['ipv6-oam-address'] = resource.get("ipv6-oam-address")
524                             vf_module_data['vservers'] = resource.get("vservers")
525
526                         elif rec["candidate"]["inventory_type"] == "cloud":
527                             if resource.get("all_directives") and resource.get("flavor_map"):
528                                 rec["attributes"]["directives"] = \
529                                     self.set_flavor_in_flavor_directives(
530                                         resource.get("flavor_map"), resource.get("all_directives"))
531
532                                 # Metrics to Prometheus
533                                 m_vim_id = resource.get("vim-id")
534                                 m_hpa_score = resource.get("hpa_score", 0)
535                                 m_svc_name = p.template['parameters'].get(
536                                     'service_name', 'N/A')
537                                 for vnfc, flavor in resource.get("flavor_map").items():
538                                     PC.VNF_COMPUTE_PROFILES.labels('ONAP',
539                                                                    m_svc_name,
540                                                                    demand_name,
541                                                                    vnfc,
542                                                                    flavor,
543                                                                    m_vim_id).inc()
544
545                                 PC.VNF_SCORE.labels('ONAP', m_svc_name,
546                                                     demand_name,
547                                                     m_hpa_score).inc()
548
549                             if resource.get('conflict_id'):
550                                 rec["candidate"]["conflict_id"] = resource.get("conflict_id")
551
552                         if resource.get('passthrough_attributes'):
553                             for key, value in resource.get('passthrough_attributes').items():
554                                 if key in rec["attributes"]:
555                                     LOG.error('Passthrough attribute {} in demand {} already exist for candidate {}'.
556                                               format(key, demand_name, rec['candidate_id']))
557                                 else:
558                                     rec["attributes"][key] = value
559                         # TODO(snarayanan): Add total value to recommendations?
560                         # msg = "--- total value of decision = {}"
561                         # LOG.debug(msg.format(_best_path.total_value))
562                         # msg = "--- total cost of decision = {}"
563                         # LOG.debug(msg.format(_best_path.total_cost))
564                         current_rec[demand_name] = rec
565
566                     recommendations.append(current_rec)
567
568                 # Update the plan with the solution
569                 p.solution = {
570                     "recommendations": recommendations
571                 }
572
573                 # multiple spin-ups logic
574                 '''
575                 go through list of recommendations in the solution
576                 for cloud candidates, check if (cloud-region-id + e2evnfkey) is in the order_locks table
577                 if so, insert the row with status 'parked' in order_locks, changes plan status to 'pending' in plans
578                 table (or other status value)
579                 otherwise, insert the row with status 'locked' in order_locks, and change status to 'solved' in plans
580                 table - continue reservation
581                 '''
582
583                 # clean up the data/record in order_locks table, deleting all records that failed from MSO
584                 order_locks = self.OrderLock.query.all()
585                 for order_lock_record in order_locks:
586
587                     plans = getattr(order_lock_record, 'plans')
588                     for plan_id, plan_attributes in plans.items():
589                         plan_dict = json.loads(plan_attributes)
590
591                         if plan_dict.get('status', None) == OrderLock.FAILED:
592                             order_lock_record.delete()
593                             LOG.info(_LI("The order lock record {} with status {} is deleted (due to failure"
594                                          " spinup from MSO) from order_locks table").
595                                      format(order_lock_record, plan_dict.get('status')))
596                             break
597
598                 inserted_order_records_dict = dict()
599                 available_dependenies_set = set()
600
601                 is_inserted_to_order_locks = True
602                 is_conflict_id_missing = False
603                 is_order_translated_before_spinup = False
604
605                 for solution in solution_list:
606
607                     for demand_name, candidate in solution.items():
608                         if candidate.get('inventory_type') == 'cloud':
609                             conflict_id = candidate.get('conflict_id')
610                             service_resource_id = candidate.get('service_resource_id')
611                             # TODO(larry): add more logic for missing conflict_id in template
612                             if not conflict_id:
613                                 is_conflict_id_missing = True
614                                 break
615
616                             available_dependenies_set.add(conflict_id)
617                             # check if conflict_id exists in order_locks table
618                             order_lock_record = self.OrderLock.query.get_plan_by_col("id", conflict_id)
619                             if order_lock_record:
620                                 is_spinup_completed = getattr(order_lock_record[0], 'is_spinup_completed')
621                                 spinup_completed_timestamp = getattr(order_lock_record[0],
622                                                                      'spinup_completed_timestamp')
623                                 if is_spinup_completed and spinup_completed_timestamp > p.translation_begin_timestamp:
624                                     is_order_translated_before_spinup = True
625                                     break
626                                 elif not is_spinup_completed:
627                                     inserted_order_records_dict[conflict_id] = service_resource_id
628
629                 if is_conflict_id_missing:
630                     message = _LE("Missing conflict identifier field for cloud candidates in the template, "
631                                   "could not insert into order_locks table")
632                     LOG.debug(message)
633                     p.status = self.Plan.SOLVED
634
635                 elif is_order_translated_before_spinup:
636                     message = _LE("Retriggering Plan {} due to the new order arrives before the "
637                                   "spinup completion of the old order ").format(p.id)
638                     LOG.debug(message)
639                     p.rehome_plan()
640
641                 elif len(inserted_order_records_dict) > 0:
642
643                     new_dependenies_set = available_dependenies_set - set(inserted_order_records_dict.keys())
644                     dependencies = ','.join(str(s) for s in new_dependenies_set)
645
646                     for conflict_id, service_resource_id in inserted_order_records_dict.items():
647                         plan = {
648                             p.id: {
649                                 "status": OrderLock.UNDER_SPIN_UP,
650                                 "created": self.current_time_millis(),
651                                 "updated": self.current_time_millis(),
652                                 "service_resource_id": service_resource_id
653                             }
654                         }
655
656                         if dependencies:
657                             plan[p.id]['dependencies'] = dependencies
658
659                         order_lock_row = self.OrderLock(id=conflict_id, plans=plan)
660                         response = order_lock_row.insert()
661
662                         # TODO(larry): add more logs for inserting order lock record (insert/update)
663                         LOG.info(_LI("Inserting the order lock record to order_locks table in MUSIC, "
664                                      "conditional insert operation response from MUSIC {}").format(response))
665                         if response and response.status_code == 200:
666                             body = response.json()
667                             LOG.info("Succcessfully inserted the record in order_locks table with "
668                                      "the following response message {}".format(body))
669                         else:
670                             is_inserted_to_order_locks = False
671                 else:
672                     for solution in solution_list:
673                         for demand_name, candidate in solution.items():
674                             if candidate.get('inventory_type') == 'cloud':
675                                 conflict_id = candidate.get('conflict_id')
676                                 service_resource_id = candidate.get('service_resource_id')
677
678                                 order_lock_record = self.OrderLock.query.get_plan_by_col("id", conflict_id)
679                                 if order_lock_record:
680                                     deleting_record = order_lock_record[0]
681                                     plans = getattr(deleting_record, 'plans')
682                                     is_spinup_completed = getattr(deleting_record, 'is_spinup_completed')
683                                     spinup_completed_timestamp = getattr(deleting_record, 'spinup_completed_timestamp')
684
685                                     if is_spinup_completed:
686                                         # persist the record in order_locks_history table
687                                         order_lock_history_record = \
688                                             self.OrderLockHistory(conflict_id=conflict_id, plans=plans,
689                                                                   is_spinup_completed=is_spinup_completed,
690                                                                   spinup_completed_timestamp=spinup_completed_timestamp
691                                                                   )
692                                         LOG.debug("Inserting the history record with conflict id {}"
693                                                   " to order_locks_history table".format(conflict_id))
694                                         order_lock_history_record.insert()
695                                         # remove the older record
696                                         LOG.debug("Deleting the order lock record {} from order_locks table"
697                                                   .format(deleting_record))
698                                         deleting_record.delete()
699
700                                 plan = {
701                                     p.id: {
702                                         "status": OrderLock.UNDER_SPIN_UP,
703                                         "created": self.current_time_millis(),
704                                         "updated": self.current_time_millis(),
705                                         "service_resource_id": service_resource_id
706                                     }
707                                 }
708                                 order_lock_row = self.OrderLock(id=conflict_id, plans=plan)
709                                 response = order_lock_row.insert()
710                                 # TODO(larry): add more logs for inserting order lock record (insert/update)
711                                 LOG.info(_LI("Inserting the order lock record to order_locks table in MUSIC, "
712                                              "conditional insert operation response from MUSIC {}").format(response))
713                                 if response and response.status_code == 200:
714                                     body = response.json()
715                                     LOG.info("Succcessfully inserted the record in order_locks table "
716                                              "with the following response message {}".format(body))
717                                 else:
718                                     is_inserted_to_order_locks = False
719
720                 if not is_inserted_to_order_locks:
721                     message = _LE("Plan {} status encountered an "
722                                   "error while inserting order lock message to MUSIC.").format(p.id)
723                     LOG.error(message)
724                     p.status = self.Plan.ERROR
725                     p.message = message
726
727                 elif p.status == self.Plan.SOLVING:
728                     if len(inserted_order_records_dict) > 0:
729                         LOG.info(_LI("The plan with id {} is parked in order_locks table,"
730                                      "waiting for MSO release calls").format(p.id))
731                         p.status = self.Plan.WAITING_SPINUP
732                     else:
733                         LOG.info(_LI("The plan with id {} is inserted in order_locks table.").
734                                  format(p.id))
735                         p.status = self.Plan.SOLVED
736
737             while 'FAILURE' in _is_success \
738                   and (self.current_time_seconds() - self.millisec_to_sec(p.updated)) <= self.conf.solver.timeout:
739                 _is_success = p.update(condition=self.solver_owner_condition)
740                 LOG.info(_LI("Plan search complete, changing the template status from solving to {}, "
741                              "atomic update response from MUSIC {}").format(p.status, _is_success))
742
743             LOG.info(_LI("Plan {} search complete, {} solution(s) found by machine {}").
744                      format(p.id, len(solution_list), p.solver_owner))
745             LOG.debug("Plan {} detailed solution: {}".
746                       format(p.id, p.solution))
747             LOG.info("Plan name: {}".format(p.name))
748
749     def terminate(self):
750         """Terminate"""
751         LOG.debug("{}".format(self.__class__.__name__))
752         self.running = False
753         self._gracefully_stop()
754         super(SolverService, self).terminate()
755
756     def reload(self):
757         """Reload"""
758         LOG.debug("{}".format(self.__class__.__name__))
759         self._restart()
760
761     def current_time_millis(self):
762         """Current time in milliseconds."""
763         return int(round(time.time() * 1000))
764
765     def set_flavor_in_flavor_directives(self, flavor_map, directives):
766         '''Insert the flavor name inside the flavor_map into flavor_directives
767
768         :param flavor_map: flavor map get
769         :param directives: All the directives get from request
770         '''
771         keys = list(flavor_map.keys())     # Python 3 Conversion -- dict object to list object
772         for ele in directives.get("directives"):
773             for item in ele.get("directives"):
774                 if "flavor_directives" in item.get("type"):
775                     for attr in item.get("attributes"):
776                         attr["attribute_value"] = flavor_map.get(attr["attribute_name"]) \
777                             if attr.get("attribute_name") in keys else ""
778         return directives