Make components policy reconfigurable and more 37/11737/2
authorMichael Hwang <mhwang@research.att.com>
Thu, 7 Sep 2017 15:29:16 +0000 (11:29 -0400)
committerMichael Hwang <mhwang@research.att.com>
Mon, 11 Sep 2017 19:47:38 +0000 (15:47 -0400)
* Add in dcae policy dependencies and policy decorators into tasks
* Fetch docker logins dynamically
* Add in policy example blueprint
* Add in policy update operation for components

Issue-Id: DCAEGEN2-97
Change-Id: Ib58adfbd7070999c7b8e59ab008f5ff90d4984a7
Signed-off-by: Michael Hwang <mhwang@research.att.com>
docker/ChangeLog.md
docker/docker-node-type.yaml
docker/dockerplugin/__init__.py
docker/dockerplugin/discovery.py
docker/dockerplugin/tasks.py
docker/examples/blueprint-laika-policy.yaml [new file with mode: 0644]
docker/examples/blueprint-laika.yaml
docker/requirements.txt
docker/setup.py

index 5094816..0d0eafc 100644 (file)
@@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](http://keepachangelog.com/) 
 and this project adheres to [Semantic Versioning](http://semver.org/).
 
+## [2.4.0]
+
+* Change *components* to be policy reconfigurable:
+    - Add policy execution operation
+    - Add policy decorators to task so that application configuration will be merged with policy
+* Fetch Docker logins from Consul
+
 ## [2.3.0+t.0.3]
 
 * Enhance `SelectedDockerHost` node type with `name_search` and add default to `docker_host_override`
index b1bf64c..7efc84d 100644 (file)
@@ -7,7 +7,7 @@ plugins:
   docker:
     executor: 'central_deployment_agent'
     package_name: dockerplugin
-    package_version: 2.3.0+t.0.3
+    package_version: 2.4.0
 
 node_types:
     # The DockerContainerForComponents node type is to be used for DCAE service components that 
@@ -83,6 +83,10 @@ node_types:
                 delete:
                     # Delete configuration from Consul
                     implementation: docker.dockerplugin.cleanup_discovery
+            dcae.interfaces.policy:
+                # This is to be invoked by the policy handler upon policy updates
+                policy_update:
+                    implementation: docker.dockerplugin.policy_update
 
 
     # This node type is intended for DCAE service components that use DMaaP and must use the 
index ef1bfec..669e196 100644 (file)
@@ -27,4 +27,5 @@ from .tasks import create_for_components, create_for_components_with_streams, \
         create_and_start_container_for_components_with_streams, \
         create_for_platforms, create_and_start_container, \
         create_and_start_container_for_components, create_and_start_container_for_platforms, \
-        stop_and_remove_container, cleanup_discovery, select_docker_host, unselect_docker_host
+        stop_and_remove_container, cleanup_discovery, select_docker_host, unselect_docker_host, \
+        policy_update
index 03a51f6..8361c13 100644 (file)
@@ -38,6 +38,9 @@ class DiscoveryConnectionError(RuntimeError):
 class DiscoveryServiceNotFoundError(RuntimeError):
     pass
 
+class DiscoveryKVEntryNotFoundError(RuntimeError):
+    pass
+
 
 def _wrap_consul_call(consul_func, *args, **kwargs):
     """Wrap Consul call to map errors"""
@@ -84,6 +87,20 @@ def remove_service_component_config(kv_conn, service_component_name):
     kv_delete_func(service_component_name)
 
 
+def get_kv_value(kv_conn, key):
+    """Get a key-value entry's value from Consul
+
+    Raises DiscoveryKVEntryNotFoundError if entry not found
+    """
+    kv_get_func = partial(_wrap_consul_call, kv_conn.kv.get)
+    (index, val) = kv_get_func(key)
+
+    if val:
+        return json.loads(val['Value']) # will raise ValueError if not JSON, let it propagate
+    else:
+        raise DiscoveryKVEntryNotFoundError("{0} kv entry not found".format(key))
+
+
 def _create_rel_key(service_component_name):
     return "{0}:rel".format(service_component_name)
 
index 837c1e9..e42e47d 100644 (file)
@@ -25,6 +25,7 @@ from cloudify import ctx
 from cloudify.decorators import operation
 from cloudify.exceptions import NonRecoverableError, RecoverableError
 import dockering as doc
+from dcaepolicy import Policies, POLICIES, POLICY_MESSAGE_TYPE
 from dockerplugin import discovery as dis
 from dockerplugin.decorators import monkeypatch_loggers, wrap_error_handling_start, \
     merge_inputs_for_start
@@ -46,6 +47,29 @@ DEFAULT_SCHEME = "http"
 SERVICE_COMPONENT_NAME = "service_component_name"
 SELECTED_CONTAINER_DESTINATION = "selected_container_destination"
 CONTAINER_ID = "container_id"
+APPLICATION_CONFIG = "application_config"
+
+
+# Utility methods
+
+def _get_docker_logins(consul_host=CONSUL_HOST):
+    """Get Docker logins
+
+    The assumption is that all Docker logins to be used will be available in
+    Consul's key-value store under "docker_plugin/docker_logins" as a list of
+    json objects where each object is a single login:
+
+        [{ "username": "dcae_dev_ro", "password": "att123ro",
+            "registry": "nexus01.research.att.com:18443" }]
+    """
+    # REVIEW: The error handling may have to be re-examined. The current thought is
+    # that the logins *must* be setup even with an empty list otherwise the task
+    # will fail (fail fast). One alterative is to pass back empty list upon any
+    # issues but this would push potential issues to a later point of the
+    # deployment.
+    kv_conn = dis.create_kv_conn(consul_host)
+    return dis.get_kv_value(kv_conn, "docker_plugin/docker_logins")
+
 
 # Lifecycle interface calls for dcae.nodes.DockerContainer
 
@@ -53,7 +77,7 @@ def _setup_for_discovery(**kwargs):
     """Setup for config discovery"""
     try:
         name = kwargs['name']
-        application_config = kwargs['application_config']
+        application_config = kwargs[APPLICATION_CONFIG]
 
         # NOTE: application_config is no longer a json string and is inputed as a
         # YAML map which translates to a dict. We don't have to do any
@@ -89,8 +113,14 @@ def _done_for_create(**kwargs):
     ctx.logger.info("Done setting up: {0}".format(name))
     return kwargs
 
+def _merge_policy_updates(**kwargs):
+    app_config = kwargs[APPLICATION_CONFIG]
+    kwargs[APPLICATION_CONFIG] = Policies.shallow_merge_policies_into(app_config)
+    return kwargs
+
 
 @monkeypatch_loggers
+@Policies.gather_policies_to_node
 @operation
 def create_for_components(**kwargs):
     """Create step for Docker containers that are components
@@ -102,8 +132,9 @@ def create_for_components(**kwargs):
     """
     _done_for_create(
             **_setup_for_discovery(
-                **_generate_component_name(
-                    **ctx.node.properties)))
+                **_merge_policy_updates(
+                    **_generate_component_name(
+                        **ctx.node.properties))))
 
 
 def _parse_streams(**kwargs):
@@ -171,6 +202,7 @@ def _setup_for_discovery_streams(**kwargs):
 
 
 @monkeypatch_loggers
+@Policies.gather_policies_to_node
 @operation
 def create_for_components_with_streams(**kwargs):
     """Create step for Docker containers that are components that use DMaaP
@@ -185,9 +217,10 @@ def create_for_components_with_streams(**kwargs):
     _done_for_create(
             **_setup_for_discovery(
                 **_setup_for_discovery_streams(
-                    **_parse_streams(
-                        **_generate_component_name(
-                            **ctx.node.properties)))))
+                    **_merge_policy_updates(
+                        **_parse_streams(
+                            **_generate_component_name(
+                                **ctx.node.properties))))))
 
 
 @monkeypatch_loggers
@@ -261,7 +294,8 @@ def _create_and_start_container(container_name, image, docker_host,
 
         docker_host_ip = _lookup_service(docker_host, consul_host=consul_host)
 
-        client = doc.create_client(docker_host_ip, DOCKER_PORT)
+        logins = _get_docker_logins(consul_host=consul_host)
+        client = doc.create_client(docker_host_ip, DOCKER_PORT, logins=logins)
 
         hcp = doc.add_host_config_params_volumes(volumes=kwargs.get("volumes",
             None))
@@ -523,7 +557,8 @@ def stop_and_remove_container(**kwargs):
 
         docker_host_ip = _lookup_service(docker_host)
 
-        client = doc.create_client(docker_host_ip, DOCKER_PORT)
+        logins = _get_docker_logins()
+        client = doc.create_client(docker_host_ip, DOCKER_PORT, logins=logins)
 
         container_id = ctx.instance.runtime_properties[CONTAINER_ID]
         doc.stop_then_remove_container(client, container_id)
@@ -558,6 +593,61 @@ def cleanup_discovery(**kwargs):
         raise RecoverableError(e)
 
 
+def _notify_container(**kwargs):
+    """Notify container using the policy section in the docker_config"""
+    dc = kwargs["docker_config"]
+
+    if "policy" in dc:
+        if dc["policy"]["trigger_type"] == "docker":
+            # REVIEW: Need to finalize on the docker config policy data structure
+            script_path = dc["policy"]["script_path"]
+            app_config = kwargs["application_config"]
+            updated_policies = kwargs["updated_policies"]
+            cmd = doc.build_policy_update_cmd(script_path, use_sh=False,
+                    updated_policies=updated_policies,
+                    application_config=app_config)
+
+            docker_host = kwargs[SELECTED_CONTAINER_DESTINATION]
+            docker_host_ip = _lookup_service(docker_host)
+            logins = _get_docker_logins()
+            client = doc.create_client(docker_host_ip, DOCKER_PORT, logins=logins)
+
+            container_id = kwargs["container_id"]
+
+            doc.notify_for_policy_update(client, container_id, cmd)
+    # else the default is no trigger
+
+    return kwargs
+
+def _done_for_policy_update(**kwargs):
+    name = kwargs['name']
+    ctx.instance.runtime_properties.update(kwargs)
+    ctx.logger.info("Done updating for policy: {0}".format(name))
+    return kwargs
+
+@monkeypatch_loggers
+@Policies.update_policies_on_node(configs_only=True)
+@operation
+def policy_update(updated_policies, **kwargs):
+    """Policy update task
+
+    This method is responsible for updating the application configuration and
+    notifying the applications that the change has occurred. This is to be used
+    for the dcae.interfaces.policy.policy_update operation.
+
+    :updated_policies: contains the list of changed policy-configs when configs_only=True
+        (default) Use configs_only=False to bring the full policy objects in :updated_policies:.
+    """
+    update_inputs = copy.deepcopy(ctx.instance.runtime_properties)
+    update_inputs["updated_policies"] = updated_policies
+
+    # Merge in policy updates into application config and make available
+    _done_for_policy_update(
+            **_notify_container(
+                **_setup_for_discovery(
+                    **_merge_policy_updates(**update_inputs))))
+
+
 # Lifecycle interface calls for dcae.nodes.DockerHost
 
 
diff --git a/docker/examples/blueprint-laika-policy.yaml b/docker/examples/blueprint-laika-policy.yaml
new file mode 100644 (file)
index 0000000..1531d21
--- /dev/null
@@ -0,0 +1,117 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+  This Blueprint installs a chain of two laika instances on a Docker cluster
+
+imports:
+  - http://www.getcloudify.org/spec/cloudify/3.4/types.yaml
+  - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/docker/2.3.0/node-type.yaml
+  - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/relationship/1.0.0/node-type.yaml
+  - {{ ONAPTEMPLATE_RAWREPOURL_org_onap_dcaegen2 }}/type_files/dcaepolicy/1.0.0/node-type.yaml
+
+inputs:
+  laika_image:
+    type: string
+
+  host_capacity_policy_id:
+    type: string
+    default: DCAE_alex.Config_host_capacity_policy_id_value
+
+  host_location_policy_id:
+    type: string
+    default: DCAE_alex.Config_host_location_policy_id_value
+
+  db_server_policy_id:
+    type: string
+    default: DCAE_alex.Config_db_server_policy_id_value
+
+node_templates:
+
+  host_capacity_policy:
+    type: dcae.nodes.policy
+    properties:
+        policy_id: { get_input: host_capacity_policy_id }
+
+  host_location_policy:
+    type: dcae.nodes.policy
+    properties:
+        policy_id: { get_input: host_location_policy_id }
+
+  db_server_policy:
+    type: dcae.nodes.policy
+    properties:
+        policy_id: { get_input: db_server_policy_id }
+
+  laika-zero:
+    type: dcae.nodes.DockerContainerForComponents
+    properties:
+        service_component_type:
+            'laika'
+        location_id:
+            'rework-central'
+        service_id:
+            'foo-service'
+        application_config:
+            some-param: "Lorem ipsum dolor sit amet"
+            downstream-laika: "{{ laika }}"
+        image: { get_input : laika_image }
+        docker_config:
+            healthcheck:
+                type: "http"
+                endpoint: "/health"
+            policy:
+                trigger_type: "docker"
+                script_path: "/bin/echo"
+    relationships:
+      # Link to downstream laika
+      - type: dcae.relationships.component_connected_to
+        target: laika-one
+      - type: dcae.relationships.component_contained_in
+        target: docker_host
+      - type: cloudify.relationships.depends_on
+        target: host_capacity_policy
+      - type: cloudify.relationships.depends_on
+        target: host_location_policy
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        start:
+          inputs:
+            ports:
+              - "8080:5432"
+            envs:
+              SOME-ENV: "BAM"
+            max_wait:
+              120
+        stop:
+          inputs:
+            cleanup_image:
+              False
+
+  laika-one:
+    type: dcae.nodes.DockerContainerForComponents
+    properties:
+        service_component_type:
+            'laika'
+        application_config:
+            some-param: "Lorem ipsum dolor sit amet"
+        image: { get_input : laika_image }
+        # Trying without health check
+    relationships:
+      - type: dcae.relationships.component_contained_in
+        target: docker_host
+      - type: cloudify.relationships.depends_on
+        target: db_server_policy
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        stop:
+          inputs:
+            cleanup_image:
+              False
+
+  docker_host:
+    type: dcae.nodes.SelectedDockerHost
+    properties:
+        location_id:
+            'rework-central'
+        docker_host_override:
+            'component_dockerhost'
index 98d27af..0db03b8 100644 (file)
@@ -21,8 +21,8 @@ node_templates:
             'laika'
         location_id:
             'rework-central'
-       service_id:
-           'foo-service'
+        service_id:
+            'foo-service'
         application_config:
             some-param: "Lorem ipsum dolor sit amet"
             downstream-laika: "{{ laika }}"
@@ -76,5 +76,5 @@ node_templates:
     properties:
         location_id:
             'rework-central'
-        name_search:
-            'platform_dockerhost'
+        docker_host_override:
+            'component_dockerhost'
index c76c229..bae15ce 100644 (file)
@@ -1,3 +1,4 @@
 # TODO: May need to add the following line
 # --extra-index-url <onap pypi url>
-python-dockering==1.2.0
+python-dockering==1.3.0
+dcaepolicy==0.0.4
index 65ac0e9..128e90b 100644 (file)
@@ -24,13 +24,14 @@ from setuptools import setup
 setup(
     name='dockerplugin',
     description='Cloudify plugin for applications run in Docker containers',
-    version="2.3.0+t.0.3",
+    version="2.4.0",
     author='Michael Hwang, Tommy Carpenter',
     packages=['dockerplugin'],
     zip_safe=False,
     install_requires=[
         "python-consul>=0.6.0,<1.0.0",
         "python-dockering>=1.0.0,<2.0.0",
-        "uuid==1.30"
+        "uuid==1.30",
+        "dcaepolicy>=0.0.4"
     ]
 )