Merge "adding missing volumeMounts for aai hbase"
authorJerome Doucerain <jerome.doucerain@bell.ca>
Mon, 14 Aug 2017 16:28:43 +0000 (16:28 +0000)
committerGerrit Code Review <gerrit@onap.org>
Mon, 14 Aug 2017 16:28:43 +0000 (16:28 +0000)
42 files changed:
cloudify/inputs/message-router-blueprint.yaml.example [new file with mode: 0644]
cloudify/scripts/configure_node.py [new file with mode: 0644]
cloudify/scripts/create.py [new file with mode: 0644]
cloudify/scripts/tasks.py [new file with mode: 0644]
cloudify/types/kubernetes.yaml [new file with mode: 0644]
kubernetes/aai/aai-deployment.yaml
kubernetes/aai/hbase-deployment.yaml
kubernetes/aai/modelloader-deployment.yaml
kubernetes/appc/appc-deployment.yaml
kubernetes/appc/db-deployment.yaml
kubernetes/appc/dgbuilder-deployment.yaml
kubernetes/message-router/message-router-dmaap.yaml
kubernetes/message-router/message-router-kafka.yaml
kubernetes/message-router/message-router-zookeeper.yaml
kubernetes/mso/db-deployment.yaml
kubernetes/mso/mso-deployment.yaml
kubernetes/oneclick/createAll.bash
kubernetes/oneclick/deleteAll.bash
kubernetes/oneclick/setenv.bash [new file with mode: 0644]
kubernetes/policy/dep-brmsgw.yaml
kubernetes/policy/dep-drools.yaml
kubernetes/policy/dep-maria.yaml
kubernetes/policy/dep-nexus.yaml
kubernetes/policy/dep-pap.yaml
kubernetes/policy/dep-pdp.yaml
kubernetes/policy/dep-pypdp.yaml
kubernetes/portal/portal-apps-deployment.yaml
kubernetes/portal/portal-mariadb-deployment.yaml
kubernetes/portal/portal-vnc-dep.yaml
kubernetes/robot/robot-deployment.yaml
kubernetes/sdc/sdc-be.yaml
kubernetes/sdc/sdc-cs.yaml
kubernetes/sdc/sdc-es.yaml
kubernetes/sdc/sdc-fe.yaml
kubernetes/sdc/sdc-kb.yaml
kubernetes/sdnc/db-deployment.yaml
kubernetes/sdnc/dgbuilder-deployment.yaml
kubernetes/sdnc/sdnc-deployment.yaml
kubernetes/sdnc/web-deployment.yaml
kubernetes/vid/vid-mariadb-deployment.yaml
kubernetes/vid/vid-server-deployment.yaml
message-router-blueprint.yaml [new file with mode: 0644]

diff --git a/cloudify/inputs/message-router-blueprint.yaml.example b/cloudify/inputs/message-router-blueprint.yaml.example
new file mode 100644 (file)
index 0000000..ecab0ec
--- /dev/null
@@ -0,0 +1,25 @@
+join_command: kubeadm join --token f66aad.cb001cc90bd69b38 192.168.120.6:6443
+kubernetes_master_ip: 192.168.120.6
+flavor: 3
+configuration_file_content:
+    apiVersion: v1
+    clusters:
+    - cluster:
+        certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01EZ3dNekEzTXpJek4xb1hEVEkzTURnd01UQTNNekl6TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTUF4Ckxzdmkyek1ZU0pjaG5QWjVDUkJQTnBLbklHTDlHY1FYRFZnQjNEc0FuaTVpc2VadDlmeENtOURxSS94NkkrRGoKSlA5ZkNNbEo5a3Z1OGgvZFF4dWJFbHhaSmZkdkFqY3p0RlVWdGpaVGREcTFDTk81UENOcnNRSkdQVS9HWDNzagpRWmlHYVNPYmJJOGQ0d2Z0bkI5dE51ZDNXMnZDZmZJUzNCNU9YMVRVMzBjVE1xVnJjZ0FLT2EvR2FUK01WV3c2CkVHZDErWmVoYWZBUWJDeG1jbHRpMlJHSUNVakpLc2xqUFRUS3JTNXJVMkwxUjdYSFd3SUhyWWtuZ05SQllwTkQKaXk3UjlCZy93S1dkMVNYVVpUODU3eE8xdjB0aU9ucFJML0tGS2IrcHBKUnVITDVORE9TbTJZSzR1OFI3MjFudgpyYVNOSTk2K0VoVGhWL2U4VWU4Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFFOUFhbWQzL0JmRVAyRitSeXJRdXp2TGQzSWEKbGZoR3Fab1JqZWFId1pnanVwQVh0VXdzd0JiYkFhZm5XMXJDd3VDVldRYXVYVWhyZ1VNelcvbEQ2blBYYWtUcgpwWTJ6NG83ZG90dlZSekVtN0dmWllMUUs2cW9Wczk4TTRjS3RhdjVlL3VVcXFGckY2NVYzUE1QV3M1NGp2Q1U5CklFTDJ0ZmQ1TzFrMGlEcXFtdWdBVjgxblNOdHlnK0FZN3o5SVdXRFhKcTNUQ1RHQnZLQmxCdzNWSDVBbnQxblEKSFNrSmJ0ZGhpaFA0KzU0emlKZEhPNFcxekFGam4zUVpIZVZDNU8rSkdSOWNZWW5aTHc4ZC92YmxZeXRpTWZPVwoyN3VzcW1RbmtPZDliNXozaTlvRDBvUUYyY1RObk85NzJkeTBuTmhiK0VMclpGNEpKUS9XVjB0Z083ST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+        server: https://192.168.120.6:6443
+      name: kubernetes
+    contexts:
+    - context:
+        cluster: kubernetes
+        user: kubernetes-admin
+      name: kubernetes-admin@kubernetes
+    current-context: kubernetes-admin@kubernetes
+    kind: Config
+    preferences: {}
+    users:
+    - name: kubernetes-admin
+      user:
+        client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJSm9EQWNpYWVkSVF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4TnpBNE1ETXdOek15TXpkYUZ3MHhPREE0TURNd056TXlNemxhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQW1xd3duNlU0TFVFQkphMWUKQzIrUjM0K0oyZ3BBYTJ0aDVOZXdnS2NhUWQwaE5BODNjNE1IMDFVUjU3b3ByNUNFczFQVmVwMkZtczlpaFRITwo0SUpINjkxUVQvTUVJZE5iWTl0RXdDV21ia1lMbFBjc09yclErYTl5VGdxYm5IWjBONnJOdUZ4dDB2alRPSUR1CmRDMnBQR3dFMW5kaHd1VVB3UUFxeS9SVjN6MTgzRnoyOWZuVHg3UXdWR0J4Rk84Z0JxRTFRYTVYenhIZ0lTQ2sKSkJka2FtRUFhSjl6NHgwZjFmbHQ4MG4wZ3RHRitkbUZuMThkbGwzZmoreGpNOGxqS21QZnRNdlc4MXF0bkVnZApoU1I3bWdMODlUckx3SmFtNkxmVmZhN29CWWJvWUMyT2gvKytZMkpwOXRpRkMyZ1ExeVBXSHJBMVZJTVBQUWdkCk8yTGNuUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIZ2ZjRVd6R08yQ1p0cEJFbUxzbllXWTJmdGlSOU1BNHY5OQpXVFhBUzNzZ3VJTm43WktUUElSeTVyTmVmSTVhS1ltMWMyU0w5ZzJlM0JpeFZUUHRsYmRWczVBanMxWnVWRGRkClhmYk93blozcnBQbDZoenpxSVh2VmxsNzI4VC9hZDRJbmZ6SFVtT1o3YSs4enBIUS9EREZKLzR1aDYrSVlnSFkKVzBBQmFXMXpOc3lQSzNhK3paV0ROSVFvNS8yTVFJYkNwN1ZQOHhobUUxZ1diY1BxVmJ1YVZJY09IZjkvUVhqeQpKZTdoK2tzSEJPNUFZczRZOFZBYXlVb0t4bTJZbmNkZHJGTWl4STRKNEkrSUp5aGRPdE5TNG1lTmcyMXIwN3U2ClZkL2E2SGt6ekxFcmdqWkxzVktIK0RUMTVhTWNSZGg3OVE1YXo1ckh1Qm5vZ0RYejFtVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+        client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbXF3d242VTRMVUVCSmExZUMyK1IzNCtKMmdwQWEydGg1TmV3Z0tjYVFkMGhOQTgzCmM0TUgwMVVSNTdvcHI1Q0VzMVBWZXAyRm1zOWloVEhPNElKSDY5MVFUL01FSWROYlk5dEV3Q1dtYmtZTGxQY3MKT3JyUSthOXlUZ3FibkhaME42ck51Rnh0MHZqVE9JRHVkQzJwUEd3RTFuZGh3dVVQd1FBcXkvUlYzejE4M0Z6Mgo5Zm5UeDdRd1ZHQnhGTzhnQnFFMVFhNVh6eEhnSVNDa0pCZGthbUVBYUo5ejR4MGYxZmx0ODBuMGd0R0YrZG1GCm4xOGRsbDNmait4ak04bGpLbVBmdE12VzgxcXRuRWdkaFNSN21nTDg5VHJMd0phbTZMZlZmYTdvQllib1lDMk8KaC8rK1kySnA5dGlGQzJnUTF5UFdIckExVklNUFBRZ2RPMkxjblFJREFRQUJBb0lCQUhxbjMrdEo5ekdUNGhnQgowcGxaQWFINnp3TzBxMzlENlo2ekdNbjlPY3BQVkp4WEVNOHVjbzg1WC9pV1hhWlhBWlMvLzRPNzFRNStOUStRCi94QjA0Qm9BS0VjdVhQR0NEWEF6bXVLUk9Oa3IvTlZGNmJJdElibFBVMkxsOEo3MEpKZGNnTVVacnhIbHRvS1IKWkFlSGlqUmJLTDcyYnZWQjl1dERlYXpCZHpPTzhHbG5VaU5WTWRoaVowazRNbEFobmV0ZjNNazFHbXFjbHJyNApISjIwbElSR2NWTWRqZm1OaThFVG5LckRwWWNvRUZ5QnozMVN2RHVTaU1GVm9sUWpZMkU1N2kyd1RVdDlSU1NjCk5oRlpEM2s1dkxwMFNIcjZtSXRURW1jY0w2VDdzTDh0UXNGLzhaZG9aUXpoRzRXUU5IZ00yUldsdEN4eklCNy8KT3czUk5OVUNnWUVBelcvNVdkWk5QV2hsRXR2VGQ4a1FjbUF3VkVYaGgrU2NvajhpVGdHbW5GNXhsSGhWVjZUdwpVYzRtRmhGU0JBSGpRWlN5Vm1NTDkwMWU1UE1aOXVRQ05Xb0pWVzU4cUI0VDJsRXNKRjJkdXdRSVZDL2g4QkhiClJ4TVZLaDJhdHZKR2dHbWsxME5tblZTYmxQVVpDVVBRWFN4R1B5VXh0UStSSmRUNHVPSm43QXNDZ1lFQXdMMnIKNUlQeFRvTHplZ254b0I5Z0RnbnFBazB3b3NicHg3V2pJY2RpdnlWNGpib2U3TmlYbEpQZXJ3MmExd2M2Ky96VgpSeVpkUjN2U1lrUnczNnp4Q1N0UHZhRFVMT053eDhtSjVRVVIwYXdReEQ4R1ZneHZmVTBhYzdqeW04L2laZWpjCkk5V1UxOXo0eEk3akIvMXNYOFpFTWFtb1RXOGVUM0I4aWNPUEd2Y0NnWUVBcWtOZmVlRnU2cklXOHVaV0FUVVcKK0hSWUdjQkJCd3VsOWFJMW9Fa2wrUHNkVDF2Yi8yT24rV1RObEFTTzROdGZxZjYvUDNHZmZUc1dwdElFZHViSwpIZExnSVhvTXZwa1BBeVc3Vy9ocXZaQytCbWdZN1lzZkhXem5ZWnhmbWJoNlRmdEFyMWdoTjh2amxqVDhwdjBaCk45OTE2T2UrcHIxY0l1cTdxUitiMmJrQ2dZQUxMYlQvZnV1SzZ5dGw0NWZBK3JEZWY1S3o2WGd0cUsyOGFIdDYKcFE3RUdVOUJvUTdVRzhmRzFVQ3dGSERya2I3SkNLUHlDWGFWZzhmeTdSZEMwY3YxQlM4Tngzc20wMVlpTUQwdwpMRGdaV2dwcTUyRGRzc0R3bW4welF3SEhLYXB1MEsrYjRISk9oc0ZpM1FxcjF2WG5KQittWmJtZUxCaXM4TkE0ClNVQk9od0tCZ0drTkJhaTFWVU9RaGVYU3Mrb3JPVWxDNDNsenlBanJZa2dod0lRd25LTWpqK2lrak9oNmtqc3IKL1lDTHVRcWNBYWNKVEF2VmZOVGcyZldyUUJTODlwVjlFRVBnV0JIQmt4a1JsNnY0WTFQZVRqOVVzeVdzaHljYQpXRkFHSkpDMXg1NWg2OWdFWSsyR1NpUEQ0MzNrQUZUd3NBUEhPbmRwdmlOTVBLek9KTldnCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
+
+
diff --git a/cloudify/scripts/configure_node.py b/cloudify/scripts/configure_node.py
new file mode 100644 (file)
index 0000000..9cfa206
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+import subprocess
+from cloudify import ctx
+from cloudify.state import ctx_parameters as inputs
+
+
+def execute_command(_command):
+
+    ctx.logger.debug('_command {0}.'.format(_command))
+
+    subprocess_args = {
+        'args': _command.split(),
+        'stdout': subprocess.PIPE,
+        'stderr': subprocess.PIPE
+    }
+
+    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+    process = subprocess.Popen(**subprocess_args)
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(_command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(_command))
+        return False
+
+    return output
+
+
+if __name__ == '__main__':
+
+    join_command = inputs['join_command']
+    join_command = 'sudo {0} --skip-preflight-checks'.format(join_command)
+    execute_command(join_command)
+
+    # Install weave-related utils
+    execute_command('sudo curl -L git.io/weave -o /usr/local/bin/weave')
+    execute_command('sudo chmod a+x /usr/local/bin/weave')
+    execute_command('sudo curl -L git.io/scope -o /usr/local/bin/scope')
+    execute_command('sudo chmod a+x /usr/local/bin/scope')
+    execute_command('/usr/local/bin/scope launch')
+
+    hostname = execute_command('hostname')
+    ctx.instance.runtime_properties['hostname'] = hostname.rstrip('\n')
diff --git a/cloudify/scripts/create.py b/cloudify/scripts/create.py
new file mode 100644 (file)
index 0000000..eb362a4
--- /dev/null
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+
+
+def check_command(command):
+
+    try:
+        process = subprocess.Popen(
+            command.split()
+        )
+    except OSError:
+        return False
+
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(command))
+        return False
+
+    return True
+
+
+def execute_command(_command):
+
+    ctx.logger.debug('_command {0}.'.format(_command))
+
+    subprocess_args = {
+        'args': _command.split(),
+        'stdout': subprocess.PIPE,
+        'stderr': subprocess.PIPE
+    }
+
+    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+    process = subprocess.Popen(**subprocess_args)
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(_command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(_command))
+        return False
+
+    return output
+
+
+if __name__ == '__main__':
+
+    docker_command = 'docker ps'
+
+    if not check_command(docker_command):
+        raise OperationRetry('Waiting for docker to be installed.')
+
+    finished = False
+    ps = execute_command('ps -ef')
+    for line in ps.split('\n'):
+        if '/usr/bin/python /usr/bin/cloud-init modules' in line:
+            ctx.logger.error('in line')
+            raise OperationRetry('Waiting for Cloud Init to finish.')
+
+    ctx.logger.info('Docker is ready and Cloud Init finished.')
diff --git a/cloudify/scripts/tasks.py b/cloudify/scripts/tasks.py
new file mode 100644 (file)
index 0000000..035a780
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+from fabric.api import run
+
+
+def label_node(labels, hostname):
+    if labels:
+        label_list = []
+        for key, value in labels.items():
+            label_pair_string = '%s=%s' % (key, value)
+            label_list.append(label_pair_string)
+        label_string = ' '.join(label_list)
+        command = 'kubectl label nodes %s %s' % (hostname, label_string)
+        run(command)
+
+
+def stop_node(hostname):
+    command = 'kubectl drain %s' % (hostname)
+    run(command)
+
+
+def delete_node(hostname):
+    command = 'kubectl delete no %s' % (hostname)
+    run(command)
diff --git a/cloudify/types/kubernetes.yaml b/cloudify/types/kubernetes.yaml
new file mode 100644 (file)
index 0000000..1698aa2
--- /dev/null
@@ -0,0 +1,91 @@
+inputs:
+
+  join_command:
+    type: string
+
+  labels:
+    default:
+      app: { get_input: app_name }
+
+  kubernetes_master_ip:
+    type: string
+
+  kubernetes_master_agent_user:
+    default: { get_input: agent_user }
+
+node_types:
+
+  cloudify.nodes.Kubernetes.Node:
+    derived_from: cloudify.nodes.Root
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        create:
+          implementation: cloudify/scripts/create.py
+        configure:
+          implementation: cloudify/scripts/configure_node.py
+          inputs:
+            join_command:
+              default: { get_input: join_command }
+        start:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            tasks_file:
+              default: cloudify/scripts/tasks.py
+            task_name:
+              default: label_node
+            task_properties:
+              default:
+                hostname: { get_attribute: [ SELF, hostname ] }
+                labels: { get_input: labels }
+            fabric_env:
+              default:
+                host_string: { get_input: kubernetes_master_ip }
+                user: { get_input: kubernetes_master_agent_user }
+                key: { get_secret: agent_key_private }
+#        stop:
+#          implementation: fabric.fabric_plugin.tasks.run_task
+#          inputs:
+#            tasks_file:
+#              default: cloudify/scripts/tasks.py
+#            task_name:
+#              default: stop_node
+#            task_properties:
+#              default:
+#                hostname: { get_attribute: [ SELF, hostname ] }
+#            fabric_env:
+#              default:
+#                host_string: { get_input: kubernetes_master_ip }
+#                user: { get_input: kubernetes_master_agent_user }
+#                key: { get_secret: agent_key_private }
+        delete:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            tasks_file:
+              default: cloudify/scripts/tasks.py
+            task_name:
+              default: delete_node
+            task_properties:
+              default:
+                hostname: { get_attribute: [ SELF, hostname ] }
+            fabric_env:
+              default:
+                host_string: { get_input: kubernetes_master_ip }
+                user: { get_input: kubernetes_master_agent_user }
+                key: { get_secret: agent_key_private }
+
+  cloudify.kubernetes.resources.Namespace:
+    derived_from: cloudify.kubernetes.resources.Main
+    properties:
+      _api_mapping:
+        default:
+          create:
+            api: CoreV1Api
+            method: create_namespace
+            payload: V1Namespace
+          read:
+            api: CoreV1Api
+            method: read_namespace
+          delete:
+            api: CoreV1Api
+            method: delete_namespace
+            payload: V1DeleteOptions
index ccf5007..857044d 100644 (file)
@@ -85,3 +85,5 @@ spec:
           hostPath:
             path: /dockerdata-nfs/onapdemo/aai/aai-data/
       restartPolicy: Always
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index c2a96c9..d39d0c7 100644 (file)
@@ -30,3 +30,5 @@ spec:
             port: 8020
           initialDelaySeconds: 5
           periodSeconds: 10
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 7eacf7c..e649fc0 100644 (file)
@@ -93,3 +93,5 @@ spec:
           initialDelaySeconds: 5
           periodSeconds: 10
       restartPolicy: Always
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index b0224d5..e2f2b4e 100644 (file)
@@ -72,3 +72,5 @@ spec:
         - name: sdnc-conf
           hostPath:
             path: /dockerdata-nfs/onapdemo/sdnc/conf
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 0af66cc..ee796fa 100644 (file)
@@ -34,3 +34,5 @@ spec:
       - name: appc-data
         hostPath:
           path: /dockerdata-nfs/onapdemo/appc/data
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 9122095..de81493 100644 (file)
@@ -62,3 +62,5 @@ spec:
           initialDelaySeconds: 5
           periodSeconds: 10
       restartPolicy: Always
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 5730a13..43c39cd 100644 (file)
@@ -70,3 +70,5 @@ spec:
       - name: mykey
         hostPath:
           path: /dockerdata-nfs/onapdemo/message-router/dmaap/mykey
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index d2f0bb0..3a2b4f0 100644 (file)
@@ -78,3 +78,5 @@ spec:
       - name: start-kafka
         hostPath:
           path: /dockerdata-nfs/onapdemo/message-router/dcae-startup-vm-message-router/docker_files/start-kafka.sh
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index d5db00e..aca4806 100644 (file)
@@ -31,3 +31,5 @@ spec:
       - name: zookeeper-data
         hostPath:
           path: /dockerdata-nfs/onapdemo/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index e7d81d9..f57f4ba 100644 (file)
@@ -45,3 +45,5 @@ spec:
         - name: mso-mariadb-docker-entrypoint-initdb
           hostPath:
             path: /dockerdata-nfs/onapdemo/mso/mariadb/docker-entrypoint-initdb.d
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 53f1e01..fb052df 100644 (file)
@@ -70,3 +70,5 @@ spec:
         - name: mso-docker-files
           hostPath:
             path: /dockerdata-nfs/onapdemo/mso/docker-files
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 6e62c6b..bc211ee 100755 (executable)
@@ -1,11 +1,14 @@
 #!/bin/bash
 
+. $(dirname "$0")/setenv.bash
+
 usage() {
   cat <<EOF
 Usage: $0 [PARAMs]
 -u                  : Display usage
 -n [NAMESPACE]      : Kubernetes namespace (required)
 -s false            : Exclude services (default: true)
+-i [INSTANCE]       : ONAP deployment instance # (default: 1)
 -a [APP]            : Specify a specific ONAP component (default: all)
                       from the following choices:
                       sdc, aai ,mso, message-router, robot,
@@ -17,16 +20,27 @@ create_namespace() {
   kubectl create namespace $1-$2
 }
 
+create_registry_key() {
+  kubectl --namespace $1-$2 create secret docker-registry $3 --docker-server=$4 --docker-username=$5 --docker-password=$6 --docker-email=$7
+}
+
 create_service() {
+  sed -i -- 's/nodePort: [0-9]\{2\}[02468]\{1\}/nodePort: '"$3"'/g' ../$2/all-services.yaml
+  sed -i -- 's/nodePort: [0-9]\{2\}[13579]\{1\}/nodePort: '"$4"'/g' ../$2/all-services.yaml
   kubectl --namespace $1-$2 create -f ../$2/all-services.yaml
+  mv ../$2/all-services.yaml-- ../$2/all-services.yaml
 }
 
 #MAINs
 NS=
 INCL_SVC=true
 APP=
+INSTANCE=1
+MAX_INSTANCE=5
+DU=$ONAP_DOCKER_USER
+DP=$ONAP_DOCKER_PASS
 
-while getopts ":n:u:s:a:" PARAM; do
+while getopts ":n:u:s:i:a:du:dp:" PARAM; do
   case $PARAM in
     u)
       usage
@@ -38,6 +52,9 @@ while getopts ":n:u:s:a:" PARAM; do
     s)
       INCL_SVC=${OPTARG}
       ;;
+    i)
+      INSTANCE=${OPTARG}
+      ;;
     a)
       APP=${OPTARG}
       if [[ -z $APP ]]; then
@@ -45,6 +62,12 @@ while getopts ":n:u:s:a:" PARAM; do
         exit 1
       fi
       ;;
+    du)
+      DU=${OPTARG}
+      ;;
+    dp)
+      DP=${OPTARG}
+      ;;
     ?)
       usage
       exit
@@ -59,8 +82,20 @@ fi
 
 if [[ ! -z "$APP" ]]; then
   ONAP_APPS=($APP)
-else
-  ONAP_APPS=('sdc' 'aai' 'mso' 'message-router' 'robot' 'vid' 'sdnc' 'portal' 'policy' 'appc')
+fi
+
+if [[ "$INCL_SVC" == true ]]; then
+
+  if [ "$INSTANCE" -gt "$MAX_INSTANCE" ];then
+    printf "\n********** You choose to create ${INSTANCE}th instance of ONAP \n"
+    printf "\n********** Due to port allocation only ${MAX_INSTANCE} instances of ONAP is allowed per kubernetes deployment\n"
+    exit 1
+  fi
+
+  start=$((300+2*INSTANCE))
+  end=$((start+1))
+  printf "\n********** Creating instance ${INSTANCE} of ONAP with port range ${start}00 and ${end}99\n"
+
 fi
 
 printf "\n********** Creating up ONAP: ${ONAP_APPS[*]}\n"
@@ -71,7 +106,7 @@ for i in ${ONAP_APPS[@]}; do
 
   if [[ "$INCL_SVC" == true ]]; then
     printf "\nCreating services **********\n"
-    create_service $NS $i
+    create_service $NS $i $start $end
   fi
 
   printf "\n"
@@ -79,7 +114,8 @@ done
 
 printf "\n\n********** Creating deployments for  ${ONAP_APPS[*]} ********** \n"
 for i in ${ONAP_APPS[@]}; do
+  create_registry_key $NS $i $ONAP_DOCKER_REGISTRY_KEY $ONAP_DOCKER_REGISTRY $DU $DP $ONAP_DOCKER_MAIL
   /bin/bash $i.sh $NS $i 'create'
 done
 
-printf "**** Done ****"
+printf "\n**** Done ****\n"
index 8d145f9..3d54aa8 100755 (executable)
@@ -1,6 +1,19 @@
 #!/bin/bash
+
+. $(dirname "$0")/setenv.bash
+
 delete_namespace() {
-  kubectl delete namespace $1-$2
+  _NS=$1-$2
+  kubectl delete namespace $_NS
+  printf "Waiting for namespace $_NS termination...\n"
+  while kubectl get namespaces $_NS > /dev/null 2>&1; do
+    sleep 2
+  done
+  printf "Namespace $_NS deleted.\n\n"
+}
+
+delete_registry_key() {
+  kubectl --namespace $1-$2 delete secret onap-docker-registry-key
 }
 
 delete_service() {
@@ -58,23 +71,21 @@ fi
 
 if [[ ! -z "$APP" ]]; then
   ONAP_APPS=($APP)
-else
-  ONAP_APPS=('sdc' 'aai' 'mso' 'message-router' 'robot' 'vid' 'sdnc' 'portal' 'policy' 'appc')
 fi
 
 printf "\n********** Cleaning up ONAP: ${ONAP_APPS[*]}\n"
 
 for i in ${ONAP_APPS[@]}; do
 
+  # delete the deployments
+  /bin/bash $i.sh $NS $i 'delete'
+
   if [[ "$INCL_SVC" == true ]]; then
     printf "\nDeleting services **********\n"
     delete_service $NS $i
     delete_namespace $NS $i
   fi
 
-  # delete the deployments
-  /bin/bash $i.sh $NS $i 'delete'
-
 done
 
 
diff --git a/kubernetes/oneclick/setenv.bash b/kubernetes/oneclick/setenv.bash
new file mode 100644 (file)
index 0000000..05a0d47
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+ONAP_APPS=('sdc' 'aai' 'mso' 'message-router' 'robot' 'vid' 'sdnc' 'portal' 'policy' 'appc')
+ONAP_DOCKER_REGISTRY_KEY=${ONAP_DOCKER_REGISTRY_KEY:-onap-docker-registry-key}
+ONAP_DOCKER_REGISTRY=${ONAP_DOCKER_REGISTRY:-nexus3.onap.org:10001}
+ONAP_DOCKER_USER=${ONAP_DOCKER_USER:-docker}
+ONAP_DOCKER_PASS=${ONAP_DOCKER_PASS:-docker}
+ONAP_DOCKER_MAIL=${ONAP_DOCKER_MAIL:-$USERNAME@$USERDOMAIN}
index fc39fc3..6758abf 100644 (file)
@@ -61,3 +61,5 @@ spec:
         - name: pe
           hostPath:
             path: /dockerdata-nfs/onapdemo/policy/opt/policy/config/pe/
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index e32be05..fbc8e17 100644 (file)
@@ -75,3 +75,5 @@ spec:
         - name: drools
           hostPath:
             path:  /dockerdata-nfs/onapdemo/policy/opt/policy/config/drools/
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index e9f4fa2..21914c8 100644 (file)
@@ -27,3 +27,5 @@ spec:
             port: 3306
           initialDelaySeconds: 5
           periodSeconds: 10
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 78eefe3..48c289e 100644 (file)
@@ -45,3 +45,5 @@ spec:
         - bash -c "/opt/nexus/nexus-2.14.2-01/bin/nexus start && sleep 1000d"
         image: nexus3.onap.org:10001/openecomp/policy/policy-nexus:1.0-STAGING-latest
         name: nexus
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 0a9d0c2..11420da 100644 (file)
@@ -85,3 +85,5 @@ spec:
         - name: pe
           hostPath:
             path: /dockerdata-nfs/onapdemo/policy/opt/policy/config/pe/
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index e3d65d4..0efe5c3 100644 (file)
@@ -64,3 +64,5 @@ spec:
         - name: pe
           hostPath:
             path: /dockerdata-nfs/onapdemo/policy/opt/policy/config/pe/
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 860dfb1..91d71d5 100644 (file)
@@ -66,3 +66,5 @@ spec:
         - name: pe
           hostPath:
             path: /dockerdata-nfs/onapdemo/policy/opt/policy/config/pe/
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 0d39e23..39f32ca 100755 (executable)
@@ -130,3 +130,5 @@ spec:
         - name: portal-root
           hostPath:
             path: /dockerdata-nfs/onapdemo/portal
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index f25e65b..f1dae14 100755 (executable)
@@ -35,3 +35,5 @@ spec:
       - name: portal-mariadb-data
         hostPath:
           path: /dockerdata-nfs/onapdemo/portal/mariadb/data
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 3ad51cc..7751eee 100644 (file)
@@ -127,3 +127,5 @@ spec:
       volumes:
         - name: ubuntu-init
           emptyDir: {}
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index a29e611..ade7147 100644 (file)
@@ -49,3 +49,5 @@ spec:
         - name: lighttpd-authorization
           hostPath:
             path: /dockerdata-nfs/onapdemo/robot/authorization
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 964a247..f23db77 100644 (file)
@@ -107,3 +107,5 @@ spec:
         - name:  sdc-logs
           hostPath:
             path:  /dockerdata-nfs/onapdemo/sdc/logs
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index d985763..74874f9 100644 (file)
@@ -82,3 +82,5 @@ spec:
         - name:  sdc-logs
           hostPath:
             path:  /dockerdata-nfs/onapdemo/sdc/logs
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 78577ee..5176b93 100644 (file)
@@ -56,3 +56,5 @@ spec:
         - name:  sdc-logs
           hostPath:
             path:  /dockerdata-nfs/onapdemo/sdc/logs
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 9ac8350..d984bbe 100644 (file)
@@ -96,3 +96,5 @@ spec:
         - name:  sdc-fe-config
           hostPath:
             path:  /dockerdata-nfs/onapdemo/sdc/sdc-fe/FE_2_setup_configuration.rb
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index d39386b..b79f4ca 100644 (file)
@@ -74,3 +74,5 @@ spec:
         - name:  sdc-logs
           hostPath:
             path:  /dockerdata-nfs/onapdemo/sdc/logs
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index bb8fda5..685edec 100644 (file)
@@ -34,3 +34,5 @@ spec:
       - name: sdnc-data
         hostPath:
           path: /dockerdata-nfs/onapdemo/sdnc/data
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 2e6c9a5..13781ff 100644 (file)
@@ -60,3 +60,5 @@ spec:
           initialDelaySeconds: 5
           periodSeconds: 10
       restartPolicy: Always
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index 50121be..0dc4839 100644 (file)
@@ -62,3 +62,5 @@ spec:
         - name: sdnc-conf
           hostPath:
             path: /dockerdata-nfs/onapdemo/sdnc/conf
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index d1edb6f..36df93b 100644 (file)
@@ -60,3 +60,5 @@ spec:
           initialDelaySeconds: 5
           periodSeconds: 10
       restartPolicy: Always
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index f2e7927..93a798a 100644 (file)
@@ -50,3 +50,5 @@ spec:
         - name: my-cnf
           hostPath:
             path: /dockerdata-nfs/onapdemo/vid/vid/lf_config/vid-my.cnf
+      imagePullSecrets:
+      - name: onap-docker-registry-key
index b6321d8..1e6e9de 100644 (file)
@@ -91,3 +91,5 @@ spec:
             port: 8080
           initialDelaySeconds: 5
           periodSeconds: 10
+      imagePullSecrets:
+      - name: onap-docker-registry-key
diff --git a/message-router-blueprint.yaml b/message-router-blueprint.yaml
new file mode 100644 (file)
index 0000000..98a2830
--- /dev/null
@@ -0,0 +1,532 @@
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+  This example deploys the OOM Message Router application. Each service/deployment pair is associated with a single Kubernetes node.
+  Node template naming convention: PROVISIONINGAPI_RESOURCETYPE_APPLICATIONCOMPONENT
+  The following resources are created:
+  - Security Group
+  - openstack_port_zookeeper - NIC that connects to the Openstack Server
+  - openstack_port_kafka - NIC that connects to the Openstack Server
+  - openstack_port_dmaap - NIC that connects to the Openstack Server
+  - openstack_server_zookeeper - a VM that a Kubernetes Node is installed on.
+  - openstack_server_kafka - a VM that a Kubernetes Node is installed on.
+  - openstack_server_dmaap - a VM that a Kubernetes Node is installed on.
+  - kubernetes_node_zookeeper - a Kubernetes node that will join the Kubernetes cluster.
+  - kubernetes_node_kafka - a Kubernetes node that will join the Kubernetes cluster.
+  - kubernetes_node_dmaap - a Kubernetes node that will join the Kubernetes cluster.
+  - kubernetes_deployment_zookeeper - a Kubernetes deployment.
+  - kubernetes_deployment_kafka - a Kubernetes deployment.
+  - kubernetes_deployment_dmaap - a Kubernetes deployment.
+  - kubernetes_service_zookeeper - a Kubernetes service.
+  - kubernetes_service_kafka - a Kubernetes service.
+  - kubernetes_service_dmaap - a Kubernetes service.
+  The following pre-setup steps are assumed, but not required:
+  - Create Cloudify Example Environment: https://github.com/cloudify-examples/cloudify-environment-setup.
+  - Create Kubernetes Cluster: https://github.com/cloudify-examples/simple-kubernetes-blueprint.
+
+imports:
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
+  # Plugin required: https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-openstack-plugin/2.2.0/plugin.yaml
+  # Plugin required: https://github.com/cloudify-incubator/cloudify-utilities-plugin/releases/download/1.2.5/cloudify_utilities_plugin-1.2.5-py27-none-linux_x86_64-centos-Core.wgn
+  - https://raw.githubusercontent.com/cloudify-incubator/cloudify-utilities-plugin/1.2.5/plugin.yaml
+  # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.0/cloudify_kubernetes_plugin-1.2.0-py27-none-linux_x86_64-centos-Core.wgn
+  - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.0/plugin.yaml
+  # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-diamond-plugin/1.3.5/cloudify_diamond_plugin-1.3.5-py27-none-linux_x86_64-centos-Core.wgn
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-diamond-plugin/1.3.5/plugin.yaml
+  # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-fabric-plugin/1.5/cloudify_fabric_plugin-1.5-py27-none-linux_x86_64-centos-Core.wgn
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-fabric-plugin/1.5/plugin.yaml
+  - cloudify/types/kubernetes.yaml
+
+inputs:
+
+  configuration_file_content:
+    type: string
+
+  NS:
+    default: oom
+
+  image:
+    description: Image to be used when launching agent VMs
+    default: { get_secret: centos_core_image }
+
+  flavor:
+    description: Flavor of the agent VMs
+    default: { get_secret: large_image_flavor }
+
+  agent_user:
+    description: >
+      User for connecting to agent VMs
+    default: centos
+
+  app_name:
+    default: message-router
+
+  security_group:
+    default: { concat: [ 'secgrp_', { get_input: app_name } ] }
+
+dsl_definitions:
+
+  openstack_config: &openstack_config
+    username: { get_secret: keystone_username }
+    password: { get_secret: keystone_password }
+    tenant_name: { get_secret: keystone_tenant_name }
+    auth_url: { get_secret: keystone_url }
+    region: { get_secret: region }
+
+groups:
+
+  openstack_server_port_group_zookeeper:
+    members:
+      - openstack_server_zookeeper
+      - openstack_port_zookeeper
+
+  openstack_server_port_group_kafka:
+    members:
+      - openstack_server_kafka
+      - openstack_port_kafka
+
+  openstack_server_port_group_dmaap:
+    members:
+      - openstack_server_dmaap
+      - openstack_port_dmaap
+
+policies:
+
+  openstack_server_port_policies_scaling:
+    type: cloudify.policies.scaling
+    properties:
+      default_instances:  1
+    targets:
+    - openstack_server_port_group_zookeeper
+    - openstack_server_port_group_kafka
+    - openstack_server_port_group_dmaap
+
+node_templates:
+
+  kubernetes_service_zookeeper:
+    type: cloudify.kubernetes.resources.Service
+    properties:
+      definition:
+        apiVersion: v1
+        kind: Service
+        metadata:
+          name: zookeeper
+          labels:
+            app: zookeeper
+        spec:
+          ports:
+          - name: zookeeper1
+            port: 2181
+          selector:
+            app: zookeeper
+          clusterIP: None
+    relationships:
+      - type: cloudify.kubernetes.relationships.managed_by_master
+        target: k8s
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_deployment_zookeeper
+
+  kubernetes_deployment_zookeeper:
+    type: cloudify.kubernetes.resources.Deployment
+    properties:
+      definition:
+        file:
+          resource_path: kubernetes/message-router/message-router-zookeeper.yaml
+    relationships:
+      - type: cloudify.kubernetes.relationships.managed_by_master
+        target: k8s
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_node_zookeeper
+
+  kubernetes_node_zookeeper:
+    type: cloudify.nodes.Kubernetes.Node
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: openstack_server_zookeeper
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        start:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            task_properties:
+              hostname: { get_attribute: [ SELF, hostname ] }
+              labels:
+                app: zookeeper
+
+  openstack_server_zookeeper:
+    type: cloudify.openstack.nodes.Server
+    properties: &openstack_server_properties
+      openstack_config: *openstack_config
+      agent_config:
+          user: { get_input: agent_user }
+          install_method: remote
+          port: 22
+          key: { get_secret: agent_key_private }
+      server:
+        key_name: ''
+        image: ''
+        flavor: ''
+      management_network_name: { get_property: [ private_network, resource_id ] }
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: k8s_node_scaling_tier
+      - target: openstack_port_zookeeper
+        type: cloudify.openstack.server_connected_to_port
+      - type: cloudify.relationships.depends_on
+        target: cloud_init_openstack_server
+    interfaces: &openstack_server_interfaces
+      cloudify.interfaces.lifecycle:
+        create:
+          inputs:
+            args:
+              image: { get_input: image }
+              flavor: { get_input: flavor }
+              userdata: { get_attribute: [ cloud_init_openstack_server, cloud_config ] }
+      cloudify.interfaces.monitoring_agent:
+          install:
+            implementation: diamond.diamond_agent.tasks.install
+            inputs:
+              diamond_config:
+                interval: 1
+          start: diamond.diamond_agent.tasks.start
+          stop: diamond.diamond_agent.tasks.stop
+          uninstall: diamond.diamond_agent.tasks.uninstall
+      cloudify.interfaces.monitoring:
+          start:
+            implementation: diamond.diamond_agent.tasks.add_collectors
+            inputs:
+              collectors_config:
+                CPUCollector: {}
+                MemoryCollector: {}
+                LoadAverageCollector: {}
+                DiskUsageCollector:
+                  config:
+                    devices: x?vd[a-z]+[0-9]*$
+                NetworkCollector: {}
+                ProcessResourcesCollector:
+                  config:
+                    enabled: true
+                    unit: B
+                    measure_collector_time: true
+                    cpu_interval: 0.5
+                    process:
+                      hyperkube:
+                        name: hyperkube
+
+  openstack_port_zookeeper:
+    type: cloudify.openstack.nodes.Port
+    properties:
+      openstack_config: *openstack_config
+    relationships: &openstack_port_relationships
+      - type: cloudify.relationships.contained_in
+        target: k8s_node_scaling_tier
+      - type: cloudify.relationships.connected_to
+        target: private_network
+      - type: cloudify.relationships.depends_on
+        target: private_subnet
+      - type: cloudify.openstack.port_connected_to_security_group
+        target: security_group
+
+  kubernetes_service_kafka:
+    type: cloudify.kubernetes.resources.Service
+    properties:
+      definition:
+        apiVersion: v1
+        kind: Service
+        metadata:
+          name: global-kafka
+          labels:
+            app: global-kafka
+        spec:
+          ports:
+          - name: kafka1
+            port: 9092
+          selector:
+            app: global-kafka
+          clusterIP: None
+    relationships:
+      - type: cloudify.kubernetes.relationships.managed_by_master
+        target: k8s
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_deployment_kafka
+
+  kubernetes_deployment_kafka:
+    type: cloudify.kubernetes.resources.Deployment
+    properties:
+      definition:
+        file:
+          resource_path: kubernetes/message-router/message-router-kafka.yaml
+    relationships:
+      - type: cloudify.kubernetes.relationships.managed_by_master
+        target: k8s
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_node_kafka
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_deployment_zookeeper
+
+  kubernetes_node_kafka:
+    type: cloudify.nodes.Kubernetes.Node
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: openstack_server_kafka
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        start:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            task_properties:
+              hostname: { get_attribute: [ SELF, hostname ] }
+              labels:
+                app: global-kafka
+
+  openstack_server_kafka:
+    type: cloudify.openstack.nodes.Server
+    properties: *openstack_server_properties
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: k8s_node_scaling_tier
+      - target: openstack_port_kafka
+        type: cloudify.openstack.server_connected_to_port
+      - type: cloudify.relationships.depends_on
+        target: cloud_init_openstack_server
+    interfaces: *openstack_server_interfaces
+
+  openstack_port_kafka:
+    type: cloudify.openstack.nodes.Port
+    properties:
+      openstack_config: *openstack_config
+    relationships: *openstack_port_relationships
+
+  kubernetes_service_dmaap:
+    type: cloudify.kubernetes.resources.Service
+    properties:
+      definition:
+        apiVersion: v1
+        kind: Service
+        metadata:
+          name: dmaap
+          labels:
+            app: dmaap
+            version: 1.0.0
+        spec:
+          ports:
+          - name: mr1
+            port: 3904
+            nodePort: 30227
+          - name: mr2
+            port: 3905
+            nodePort: 30226
+          selector:
+            app: dmaap
+          type: NodePort
+    relationships:
+      - type: cloudify.kubernetes.relationships.managed_by_master
+        target: k8s
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_deployment_dmaap
+
+  kubernetes_deployment_dmaap:
+    type: cloudify.kubernetes.resources.Deployment
+    properties:
+      definition:
+        file:
+          resource_path: kubernetes/message-router/message-router-dmaap.yaml
+    relationships:
+      - type: cloudify.kubernetes.relationships.managed_by_master
+        target: k8s
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_node_dmaap
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_deployment_zookeeper
+
+  kubernetes_node_dmaap:
+    type: cloudify.nodes.Kubernetes.Node
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: openstack_server_dmaap
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        start:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            task_properties:
+              hostname: { get_attribute: [ SELF, hostname ] }
+              labels:
+                app: global-dmaap
+
+  openstack_server_dmaap:
+    type: cloudify.openstack.nodes.Server
+    properties: *openstack_server_properties
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: k8s_node_scaling_tier
+      - target: openstack_port_dmaap
+        type: cloudify.openstack.server_connected_to_port
+      - type: cloudify.relationships.depends_on
+        target: cloud_init_openstack_server
+    interfaces: *openstack_server_interfaces
+
+  openstack_port_dmaap:
+    type: cloudify.openstack.nodes.Port
+    properties:
+      openstack_config: *openstack_config
+    relationships: *openstack_port_relationships
+
+  security_group:
+    type: cloudify.openstack.nodes.SecurityGroup
+    properties:
+      openstack_config: *openstack_config
+      security_group:
+        name: { get_input: security_group }
+        description: kubernetes master security group
+      rules:
+      rules:
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 22
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 53
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 53
+        protocol: udp
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 80
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 443
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 2379
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 4001
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 4789
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 6443
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 6783
+        protocol: tcp
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 6783
+        protocol: udp
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 6784
+        protocol: tcp
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 6784
+        protocol: udp
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 8000
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 8080
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 9090
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 10250
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 2181
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 9092
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 3904
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 30227
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 3905
+      - remote_ip_prefix: 0.0.0.0/0
+        port: 30226
+
+  private_subnet:
+    type: cloudify.openstack.nodes.Subnet
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: private_subnet_name }
+    relationships:
+      - target: private_network
+        type: cloudify.relationships.contained_in
+
+  private_network:
+    type: cloudify.openstack.nodes.Network
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: private_network_name }
+
+  external_network:
+    type: cloudify.openstack.nodes.Network
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: external_network_name }
+
+  cloud_init_openstack_server:
+    type: cloudify.nodes.CloudInit.CloudConfig
+    properties:
+      resource_config:
+        groups:
+        - docker
+        users:
+        - name: { get_input: agent_user }
+          primary-group: wheel
+          groups: docker
+          shell: /bin/bash
+          sudo: ['ALL=(ALL) NOPASSWD:ALL']
+          ssh-authorized-keys:
+            - { get_secret: agent_key_public }
+        write_files:
+        - path: /etc/yum.repos.d/kubernetes.repo
+          owner: root:root
+          permissions: '0444'
+          content: |
+            # installed by cloud-init
+            [kubernetes]
+            name=Kubernetes
+            baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+            enabled=1
+            gpgcheck=1
+            repo_gpgcheck=1
+            gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+                   https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+        packages:
+        - [epel-release]
+        - [gcc]
+        - [python-dev]
+        - [python-wheel]
+        - [python-setuptools]
+        - [libffi-devel]
+        - [python-devel]
+        - [openssl-devel]
+        - [docker, 1.12.6-28.git1398f24.el7.centos]
+        - [kubelet, 1.6.4-0]
+        - [kubeadm, 1.6.4-0]
+        - [kubectl, 1.6.4-0]
+        - [kubernetes-cni, 0.5.1-0]
+        - [git]
+        - [wget]
+        runcmd:
+        - systemctl enable docker
+        - systemctl start docker
+        - systemctl enable kubelet
+        - systemctl start kubelet
+        - yum install -y python-pip
+        - pip install --upgrade pip
+        - pip install docker-compose
+        - pip install backports.ssl_match_hostname --upgrade
+        - mkdir -p /tmp/oom/
+        - git clone https://gerrit.onap.org/r/oom.git /tmp/oom
+        - sleep 15
+        - chmod 755 /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
+        - sed -i -e "s/\.\/docker_files/\/tmp\/oom\/kubernetes\/config\/docker\/init\/src\/config\/message-router\/dcae-startup-vm-message-router\/docker_files/g" /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
+        - sed -i -e "s/\/opt\/docker\/docker-compose/\/bin\/docker-compose/g" /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
+        - mv /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/__docker-compose.yml /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/docker-compose.yml
+        - mv /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/__MsgRtrApi.properties /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/docker_files/MsgRtrApi.properties
+        - sh -c /tmp/oom/kubernetes/config/docker/init/src/config/message-router/dcae-startup-vm-message-router/deploy.sh
+
+  k8s_node_scaling_tier:
+    type: cloudify.nodes.Root
+
+  k8s:
+    type: cloudify.kubernetes.nodes.Master
+    properties:
+      configuration:
+        file_content: { get_input: configuration_file_content }
+