--- /dev/null
+from fabric.api import run
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+
+def _retrieve_namespace():
+ namespace = ctx.node.properties.get(
+ 'namespace',
+ ctx.node.properties
+ .get('options', {})
+ .get('namespace', None)
+ )
+
+ if not namespace:
+ raise NonRecoverableError(
+ 'Namespace is not defined (node={})'.format(ctx.node.name)
+ )
+
+ return namespace
+
+
+def configure_secret():
+ namespace = _retrieve_namespace()
+ ctx.logger.info(
+ 'Configuring docker secrets for namespace: {0}'.format(namespace)
+ )
+
+ command = 'kubectl create secret ' \
+ 'docker-registry onap-docker-registry-key ' \
+ '--docker-server=nexus3.onap.org:10001 ' \
+ '--docker-username=docker ' \
+ '--docker-password=docker ' \
+ '--docker-email=email@email.com ' \
+ '--namespace={0}'.format(namespace)
+
+ ctx.logger.info('Command "{0}" will be executed'.format(command))
+ run(command)
+
+ ctx.logger.info('Docker secrets configured successfully')
--- /dev/null
+import pip
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+
+SERVICES_FILE_PARTS_SEPARATOR = '---'
+
+
+def _import_or_install():
+ try:
+ import yaml
+ except ImportError:
+ pip.main(["install", "pyaml"])
+
+ try:
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+ except ImportError:
+ pip.main([
+ "install",
+ "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
+ ])
+
+ import yaml
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+
+ return yaml, kubernetes_plugin
+
+
+def _retrieve_path():
+ return ctx.node.properties.get('init_pod', None)
+
+
+def _save_deployment_result(key):
+ result = ctx.instance.runtime_properties['kubernetes']
+ ctx.instance.runtime_properties[key] = result
+ ctx.instance.runtime_properties['kubernetes'] = {}
+
+
+def _do_create_init_pod(kubernetes_plugin, yaml):
+ ctx.logger.info('Creating init pod')
+ init_pod_file_path = _retrieve_path()
+
+ if not init_pod_file_path:
+ raise NonRecoverableError('Init pod file is not defined.')
+
+ temp_file_path = ctx.download_resource_and_render(
+ init_pod_file_path
+ )
+
+ with open(temp_file_path) as temp_file:
+ init_pod_file_content = temp_file.read()
+ init_pod_yaml_content = yaml.load(init_pod_file_content)
+
+ kubernetes_plugin.resource_create(definition=init_pod_yaml_content)
+ _save_deployment_result('init_pod')
+
+ ctx.logger.info('Init pod created successfully')
+
+
+if __name__ == '__main__':
+ yaml, kubernetes_plugin = _import_or_install()
+
+ _do_create_init_pod(kubernetes_plugin, yaml)
+
--- /dev/null
+import pip
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+
+def _import_or_install():
+ try:
+ import yaml
+ except ImportError:
+ pip.main(["install", "pyaml"])
+
+ try:
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+ except ImportError:
+ pip.main([
+ "install",
+ "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
+ ])
+
+ import yaml
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+
+ return yaml, kubernetes_plugin
+
+
+def _retrieve_namespace():
+ namespace = ctx.node.properties.get(
+ 'namespace',
+ ctx.node.properties
+ .get('options', {})
+ .get('namespace', None)
+ )
+
+ if not namespace:
+ raise NonRecoverableError(
+ 'Namespace is not defined (node={})'.format(ctx.node.name)
+ )
+
+ return namespace
+
+
+def _prepare_namespace_resource_template(name):
+ return {
+ 'definition': {
+ 'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {
+ 'name': name,
+ 'labels': {
+ 'name': name
+ },
+ },
+ },
+ 'api_mapping': {
+ 'create': {
+ 'api': 'CoreV1Api',
+ 'method': 'create_namespace',
+ 'payload': 'V1Namespace'
+ },
+ 'read': {
+ 'api': 'CoreV1Api',
+ 'method': 'read_namespace',
+ },
+ 'delete': {
+ 'api': 'CoreV1Api',
+ 'method': 'delete_namespace',
+ 'payload': 'V1DeleteOptions'
+ }
+ }
+ }
+
+
+def _save_deployment_result(key):
+ result = ctx.instance.runtime_properties['kubernetes']
+ ctx.instance.runtime_properties[key] = result
+ ctx.instance.runtime_properties['kubernetes'] = {}
+
+
+def _do_create_namespace(kubernetes_plugin):
+ namespace = _retrieve_namespace()
+ ctx.logger.info('Creating namespace: {0}'.format(namespace))
+
+ namespace_resource_template = _prepare_namespace_resource_template(
+ namespace
+ )
+
+ ctx.logger.debug(
+ 'Kubernetes object which will be deployed: {0}'
+ .format(namespace_resource_template)
+ )
+
+ kubernetes_plugin.custom_resource_create(**namespace_resource_template)
+ _save_deployment_result('namespace')
+ ctx.logger.info('Namespace created successfully')
+
+
+if __name__ == '__main__':
+ _, kubernetes_plugin = _import_or_install()
+
+ _do_create_namespace(kubernetes_plugin)
--- /dev/null
+import pip
+
+from cloudify import ctx
+
+
+SERVICES_FILE_PARTS_SEPARATOR = '---'
+
+
+def _import_or_install():
+ try:
+ import yaml
+ except ImportError:
+ pip.main(["install", "pyaml"])
+
+ try:
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+ except ImportError:
+ pip.main([
+ "install",
+ "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
+ ])
+
+ try:
+ import jinja2
+ except ImportError:
+ pip.main(["install", "jinja2"])
+
+ import yaml
+ import jinja2
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+
+ return yaml, kubernetes_plugin, jinja2
+
+
+def _init_jinja(jinja2):
+ return jinja2.Environment(
+ loader=jinja2.BaseLoader()
+ )
+
+
+def _render_template(jinja_env, template_content, values):
+ template_content = template_content.replace('.Values', 'Values')
+
+ template = jinja_env.from_string(template_content)
+ rendered_template = template.render(Values=values)
+ return rendered_template
+
+
+def _retrieve_resources_paths():
+ return ctx.node.properties.get('resources', [])
+
+
+def _retrieve_services_paths():
+ return ctx.node.properties.get('services', None)
+
+
+def _retrieve_values(yaml):
+ values_file_path = ctx.node.properties.get('values', None)
+
+ if values_file_path:
+ return yaml.load(ctx.get_resource(values_file_path))
+
+ ctx.logger.warn('Values file not found')
+
+
+def _save_deployment_result(key):
+ result = ctx.instance.runtime_properties['kubernetes']
+ ctx.instance.runtime_properties[key] = result
+ ctx.instance.runtime_properties['kubernetes'] = {}
+
+
+def _do_create_resources(kubernetes_plugin, yaml, jinja_env, values):
+ for path in _retrieve_resources_paths():
+ ctx.logger.info('Creating resource defined in: {0}'.format(path))
+
+ template_content = ctx.get_resource(path)
+ yaml_content = _render_template(
+ jinja_env,
+ template_content,
+ values
+ )
+ content = yaml.load(yaml_content)
+
+ kubernetes_plugin.resource_create(definition=content)
+ _save_deployment_result(
+ 'resource_{0}'.format(content['metadata']['name'])
+ )
+
+ ctx.logger.info('Resources created successfully')
+
+
+def _do_create_services(kubernetes_plugin, yaml, jinja_env, values):
+ ctx.logger.info('Creating services')
+ services_file_path = _retrieve_services_paths()
+
+ if not services_file_path:
+ ctx.logger.warn(
+ 'Service file is not defined. Skipping services provisioning !'
+ )
+
+ return
+
+ template_content = ctx.get_resource(services_file_path)
+ yaml_content = _render_template(
+ jinja_env,
+ template_content,
+ values
+ )
+
+ yaml_content_parts = \
+ yaml_content.split(SERVICES_FILE_PARTS_SEPARATOR)
+
+ for yaml_content_part in yaml_content_parts:
+ content = yaml.load(yaml_content_part)
+
+ kubernetes_plugin.resource_create(definition=content)
+ _save_deployment_result(
+ 'service_{0}'.format(content['metadata']['name'])
+ )
+
+ ctx.logger.info('Services created successfully')
+
+
+if __name__ == '__main__':
+ yaml, kubernetes_plugin, jinja2 = _import_or_install()
+ jinja_env = _init_jinja(jinja2)
+ values = _retrieve_values(yaml)
+
+ _do_create_resources(kubernetes_plugin, yaml, jinja_env, values)
+ _do_create_services(kubernetes_plugin, yaml, jinja_env, values)
+
--- /dev/null
+import pip
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+
+SERVICES_FILE_PARTS_SEPARATOR = '---'
+
+
+def _import_or_install():
+ try:
+ import yaml
+ except ImportError:
+ pip.main(["install", "pyaml"])
+
+ try:
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+ except ImportError:
+ pip.main([
+ "install",
+ "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
+ ])
+
+ import yaml
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+
+ return yaml, kubernetes_plugin
+
+
+def _retrieve_path():
+ return ctx.node.properties.get('init_pod', None)
+
+
+def _set_deployment_result(key):
+ result = ctx.instance.runtime_properties.pop(key)
+ ctx.instance.runtime_properties['kubernetes'] = result
+
+
+def _do_delete_init_pod(kubernetes_plugin, yaml):
+ ctx.logger.info('Deleting init pod')
+ init_pod_file_path = _retrieve_path()
+
+ if not init_pod_file_path:
+ raise NonRecoverableError('Init pod file is not defined.')
+
+ temp_file_path = ctx.download_resource_and_render(
+ init_pod_file_path
+ )
+
+ with open(temp_file_path) as temp_file:
+ init_pod_file_content = temp_file.read()
+ init_pod_yaml_content = yaml.load(init_pod_file_content)
+
+ _set_deployment_result('init_pod')
+ kubernetes_plugin.resource_delete(definition=init_pod_yaml_content)
+
+ ctx.logger.info('Init pod deleted successfully')
+
+
+if __name__ == '__main__':
+ yaml, kubernetes_plugin = _import_or_install()
+
+ _do_delete_init_pod(kubernetes_plugin, yaml)
+
--- /dev/null
+import pip
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+
+def _import_or_install():
+ try:
+ import yaml
+ except ImportError:
+ pip.main(["install", "pyaml"])
+
+ try:
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+ except ImportError:
+ pip.main([
+ "install",
+ "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
+ ])
+
+ import yaml
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+
+ return yaml, kubernetes_plugin
+
+
+def _retrieve_namespace():
+ namespace = ctx.node.properties.get(
+ 'namespace',
+ ctx.node.properties
+ .get('options', {})
+ .get('namespace', None)
+ )
+
+ if not namespace:
+ raise NonRecoverableError(
+ 'Namespace is not defined (node={})'.format(ctx.node.name)
+ )
+
+ return namespace
+
+
+def _prepare_namespace_resource_template(name):
+ return {
+ 'definition': {
+ 'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {
+ 'name': name,
+ 'labels': {
+ 'name': name
+ },
+ },
+ },
+ 'api_mapping': {
+ 'create': {
+ 'api': 'CoreV1Api',
+ 'method': 'create_namespace',
+ 'payload': 'V1Namespace'
+ },
+ 'read': {
+ 'api': 'CoreV1Api',
+ 'method': 'read_namespace',
+ },
+ 'delete': {
+ 'api': 'CoreV1Api',
+ 'method': 'delete_namespace',
+ 'payload': 'V1DeleteOptions'
+ }
+ }
+ }
+
+
+def _set_deployment_result(key):
+ result = ctx.instance.runtime_properties.pop(key)
+ ctx.instance.runtime_properties['kubernetes'] = result
+
+
+def _do_delete_namespace(kubernetes_plugin):
+ namespace = _retrieve_namespace()
+ ctx.logger.info('Deleting namespace: {0}'.format(namespace))
+
+ namespace_resource_template = _prepare_namespace_resource_template(
+ namespace
+ )
+
+ ctx.logger.debug(
+ 'Kubernetes object which will be deleted: {0}'
+ .format(namespace_resource_template)
+ )
+
+ _set_deployment_result('namespace')
+ kubernetes_plugin.custom_resource_delete(**namespace_resource_template)
+ ctx.logger.info('Namespace deleted successfully')
+
+
+if __name__ == '__main__':
+ _, kubernetes_plugin = _import_or_install()
+
+ _do_delete_namespace(kubernetes_plugin)
+
--- /dev/null
+import pip
+
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+
+SERVICES_FILE_PARTS_SEPARATOR = '---'
+
+
+def _import_or_install():
+ try:
+ import yaml
+ except ImportError:
+ pip.main(["install", "pyaml"])
+
+ try:
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+ except ImportError:
+ pip.main([
+ "install",
+ "https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/archive/1.2.1rc1.zip"
+ ])
+
+ try:
+ import jinja2
+ except ImportError:
+ pip.main(["install", "jinja2"])
+
+ import yaml
+ import jinja2
+ import cloudify_kubernetes.tasks as kubernetes_plugin
+
+ return yaml, kubernetes_plugin, jinja2
+
+
+def _init_jinja(jinja2):
+ return jinja2.Environment(
+ loader=jinja2.BaseLoader()
+ )
+
+
+def _render_template(jinja_env, template_content, values):
+ template_content = template_content.replace('.Values', 'Values')
+
+ template = jinja_env.from_string(template_content)
+ rendered_template = template.render(Values=values)
+ return rendered_template
+
+
+def _retrieve_resources_paths():
+ return ctx.node.properties.get('resources', [])
+
+
+def _retrieve_services_paths():
+ return ctx.node.properties.get('services', None)
+
+
+def _retrieve_values(yaml):
+ values_file_path = ctx.node.properties.get('values', None)
+
+ if values_file_path:
+ return yaml.load(ctx.get_resource(values_file_path))
+
+ ctx.logger.warn('Values file not found')
+
+
+def _set_deployment_result(key):
+ result = ctx.instance.runtime_properties.pop(key)
+ ctx.instance.runtime_properties['kubernetes'] = result
+
+
+def _do_delete_resources(kubernetes_plugin, yaml, jinja_env, values):
+ for path in _retrieve_resources_paths():
+ ctx.logger.info('Deleting resource defined in: {0}'.format(path))
+
+ template_content = ctx.get_resource(path)
+ yaml_content = _render_template(
+ jinja_env,
+ template_content,
+ values
+ )
+ content = yaml.load(yaml_content)
+
+ _set_deployment_result(
+ 'resource_{0}'.format(content['metadata']['name'])
+ )
+ kubernetes_plugin.resource_delete(definition=content)
+
+ ctx.logger.info('Resources deleted successfully')
+
+
+def _do_delete_services(kubernetes_plugin, yaml, jinja_env, values):
+ ctx.logger.info('Deleting services')
+ services_file_path = _retrieve_services_paths()
+
+ if not services_file_path:
+ ctx.logger.warn(
+ 'Service file is not defined. Skipping services provisioning !'
+ )
+
+ return
+
+ template_content = ctx.get_resource(services_file_path)
+ yaml_content = _render_template(
+ jinja_env,
+ template_content,
+ values
+ )
+
+ yaml_content_parts = \
+ yaml_content.split(SERVICES_FILE_PARTS_SEPARATOR)
+
+ for yaml_content_part in yaml_content_parts:
+ content = yaml.load(yaml_content_part)
+
+ _set_deployment_result(
+ 'service_{0}'.format(content['metadata']['name'])
+ )
+ kubernetes_plugin.resource_delete(definition=content)
+
+ ctx.logger.info('Services deleted successfully')
+
+
+if __name__ == '__main__':
+ yaml, kubernetes_plugin, jinja2 = _import_or_install()
+ jinja_env = _init_jinja(jinja2)
+ values = _retrieve_values(yaml)
+
+ _do_delete_services(kubernetes_plugin, yaml, jinja_env, values)
+ _do_delete_resources(kubernetes_plugin, yaml, jinja_env, values)
+
+
+++ /dev/null
-from cloudify import ctx
+++ /dev/null
-from cloudify import ctx
+++ /dev/null
-from cloudify import ctx
node_types:
+ cloudify.onap.kubernetes.Environment:
+ derived_from: cloudify.nodes.Root
+ properties:
+ namespace:
+ type: string
+ init_pod:
+ type: string
+ description: >
+ Path to init pod YAML file
+ options:
+ description: >
+ For compatibility with kubernetes plugin.
+ To be removed in the future.
+ default: {}
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: cloudify/scripts/onap/create_namespace.py
+ executor: central_deployment_agent
+ start:
+ implementation: cloudify/scripts/onap/create_init_pod.py
+ executor: central_deployment_agent
+ stop:
+ implementation: cloudify/scripts/onap/delete_init_pod.py
+ executor: central_deployment_agent
+ delete:
+ implementation: cloudify/scripts/onap/delete_namespace.py
+ executor: central_deployment_agent
+
cloudify.onap.kubernetes.App:
derived_from: cloudify.nodes.Root
properties:
type: string
description: >
Name of ONAP app
+ values:
+ type: string
+ description: >
+ Paths (relative, blueprint prespective) to values.yaml file
+ required: false
resources:
description: >
List of paths (relative, blueprint prespective)
description: >
Parameters required to create kubernetes resources for each app
default: {}
+ options:
+ description: >
+ For compatibility with kubernetes plugin.
+ To be removed in the future.
+ default: {}
interfaces:
cloudify.interfaces.lifecycle:
create:
- implementation: cloudify/scripts/onap/read_definitions.py
+ implementation: cloudify/scripts/onap/create_namespace.py
executor: central_deployment_agent
configure:
- implementation: cloudify/scripts/onap/patch_definitions.py
+ implementation: fabric.fabric_plugin.tasks.run_task
executor: central_deployment_agent
+ inputs:
+ tasks_file:
+ default: cloudify/scripts/onap/configure_docker_secret_workaround.py
+ task_name:
+ default: configure_secret
+ fabric_env:
+ default:
+ host_string: { get_secret: kubernetes_master_ip }
+ user: { get_secret: agent_user }
+ key: { get_secret: agent_key_private }
start:
- implementation: cloudify/scripts/onap/provision_definitions.py
+ implementation: cloudify/scripts/onap/create_resources_services.py
+ executor: central_deployment_agent
+ stop:
+ implementation: cloudify/scripts/onap/delete_resources_services.py
+ executor: central_deployment_agent
+ delete:
+ implementation: cloudify/scripts/onap/delete_namespace.py
executor: central_deployment_agent
+++ /dev/null
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: aai-dmaap
- namespace: "{{ .Values.nsPrefix }}-aai"
-spec:
- selector:
- matchLabels:
- app: aai-dmaap
- template:
- metadata:
- labels:
- app: aai-dmaap
- name: aai-dmaap
- annotations:
- pod.beta.kubernetes.io/init-containers: '[
- {
- "args": [
- "--container-name",
- "aai-kafka",
- "--container-name",
- "aai-zookeeper"
- ],
- "command": [
- "/root/ready.py"
- ],
- "env": [
- {
- "name": "NAMESPACE",
- "valueFrom": {
- "fieldRef": {
- "apiVersion": "v1",
- "fieldPath": "metadata.namespace"
- }
- }
- }
- ],
- "image": "{{ .Values.image.readiness }}",
- "imagePullPolicy": "{{ .Values.pullPolicy }}",
- "name": "aai-dmaap-readiness"
- }
- ]'
- spec:
- containers:
- - image: "{{ .Values.image.dmaapImage }}:{{ .Values.image.dmaapVersion}}"
- imagePullPolicy: {{ .Values.pullPolicy }}
- name: aai-dmaap
- ports:
- - containerPort: 3904
- - containerPort: 3905
- readinessProbe:
- tcpSocket:
- port: 3904
- initialDelaySeconds: 5
- periodSeconds: 10
- volumeMounts:
- - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
- name: appprops
- - mountPath: /appl/dmaapMR1/etc/cadi.properties
- name: cadi
- - mountPath: /appl/dmaapMR1/etc/keyfile
- name: mykey
- restartPolicy: Always
- volumes:
- - name: appprops
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/aai/message-router/dmaap/MsgRtrApi.properties
- - name: cadi
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/aai/message-router/dmaap/cadi.properties
- - name: mykey
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/aai/message-router/dmaap/mykey
- imagePullSecrets:
- - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+++ /dev/null
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: aai-kafka
- namespace: "{{ .Values.nsPrefix }}-aai"
-spec:
- selector:
- matchLabels:
- app: aai-kafka
- template:
- metadata:
- labels:
- app: aai-kafka
- name: aai-kafka
- annotations:
- pod.beta.kubernetes.io/init-containers: '[
- {
- "args": [
- "--container-name",
- "aai-zookeeper"
- ],
- "command": [
- "/root/ready.py"
- ],
- "env": [
- {
- "name": "NAMESPACE",
- "valueFrom": {
- "fieldRef": {
- "apiVersion": "v1",
- "fieldPath": "metadata.namespace"
- }
- }
- }
- ],
- "image": "{{ .Values.image.readiness }}",
- "imagePullPolicy": "{{ .Values.pullPolicy }}",
- "name": "aai-kafka-readiness"
- }
- ]'
- spec:
- containers:
- - image: "{{ .Values.image.kafkaImage }}:{{ .Values.image.kafkaVersion }}"
- imagePullPolicy: {{ .Values.pullPolicy }}
- name: aai-kafka
- ports:
- - containerPort: 9092
- readinessProbe:
- tcpSocket:
- port: 9092
- initialDelaySeconds: 5
- periodSeconds: 10
- env:
- - name: KAFKA_ZOOKEEPER_CONNECT
- value: "aai-zookeeper.{{ .Values.nsPrefix }}-aai:2181"
- - name: KAFKA_ADVERTISED_HOST_NAME
- value: "aai-kafka"
- - name: KAFKA_BROKER_ID
- value: "1"
- - name: KAFKA_ADVERTISED_PORT
- value: "9092"
- - name: KAFKA_PORT
- value: "9092"
- volumeMounts:
- - mountPath: /var/run/docker.sock
- name: docker-socket
- - mountPath: /kafka
- name: kafka-data
- - mountPath: /start-kafka.sh
- name: start-kafka
- restartPolicy: Always
- volumes:
- - name: docker-socket
- hostPath:
- path: /var/run/docker.sock
- - name: kafka-data
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/"
- - name: start-kafka
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/message-router/dcae-startup-vm-message-router/docker_files/start-kafka.sh"
- imagePullSecrets:
- - name: "{{ .Values.nsPrefix }}-docker-registry-key"
labels:
app: aai-resources
name: aai-resources
+ annotations:
+ pod.beta.kubernetes.io/init-containers: '[
+ {
+ "args": [
+ "--container-name",
+ "hbase"
+ ],
+ "command": [
+ "/root/ready.py"
+ ],
+ "env": [
+ {
+ "name": "NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ],
+ "image": "{{ .Values.image.readiness }}",
+ "imagePullPolicy": "{{ .Values.pullPolicy }}",
+ "name": "aai-resources-readiness"
+ }
+ ]'
spec:
containers:
- name: aai-resources
- name: CHEF_GIT_URL
value: http://gerrit.onap.org/r/aai
volumeMounts:
- - mountPath: /opt/aai/logroot/AAI-RES/
+ - mountPath: /opt/aai/logroot/
name: aai-resources-logs
- - mountPath: /var/chef/aai-config/
- name: aai-config
- mountPath: /var/chef/aai-data/
name: aai-data
+ - mountPath: /docker-entrypoint.sh
+ name: entrypoint-override
ports:
- containerPort: 8447
readinessProbe:
- name: aai-resources-logs
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-resources/logs/"
- - name: aai-config
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-config/"
- name: aai-data
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-data/"
+ - name: entrypoint-override
+ hostPath:
+ path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-resources/docker-entrypoint.sh"
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
labels:
app: aai-traversal
name: aai-traversal
+ annotations:
+ pod.beta.kubernetes.io/init-containers: '[
+ {
+ "args": [
+ "--container-name",
+ "hbase",
+ "--container-name",
+ "aai-resources"
+ ],
+ "command": [
+ "/root/ready.py"
+ ],
+ "env": [
+ {
+ "name": "NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ],
+ "image": "{{ .Values.image.readiness }}",
+ "imagePullPolicy": "{{ .Values.pullPolicy }}",
+ "name": "aai-traversal-readiness"
+ }
+ ]'
spec:
containers:
- name: aai-traversal
- name: CHEF_GIT_URL
value: http://gerrit.onap.org/r/aai
volumeMounts:
- - mountPath: /opt/aai/logroot/AAI-GQ/
+ - mountPath: /opt/aai/logroot/
name: aai-traversal-logs
- - mountPath: /var/chef/aai-config/
- name: aai-config
- mountPath: /var/chef/aai-data/
name: aai-data
+ - mountPath: /docker-entrypoint.sh
+ name: entrypoint-override
ports:
- containerPort: 8446
readinessProbe:
- name: aai-traversal-logs
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-traversal/logs/"
- - name: aai-config
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-config/"
- name: aai-data
hostPath:
path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-data/"
+ - name: entrypoint-override
+ hostPath:
+ path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-traversal/docker-entrypoint.sh"
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+++ /dev/null
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: aai-zookeeper
- namespace: "{{ .Values.nsPrefix }}-aai"
-spec:
- selector:
- matchLabels:
- app: aai-zookeeper
- template:
- metadata:
- labels:
- app: aai-zookeeper
- name: aai-zookeeper
- spec:
- containers:
- - image: "{{ .Values.image.aaiZookeeperImage }}:{{ .Values.image.aaiZookeeperVersion }}"
- imagePullPolicy: {{ .Values.pullPolicy }}
- name: aai-zookeeper
- volumeMounts:
- - mountPath: /opt/zookeeper-3.4.9/data/
- name: aai-zookeeper-data
- ports:
- - containerPort: 2181
- readinessProbe:
- tcpSocket:
- port: 2181
- initialDelaySeconds: 5
- periodSeconds: 10
- volumes:
- - name: aai-zookeeper-data
- hostPath:
- path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper"
- restartPolicy: Always
- imagePullSecrets:
- - name: "{{ .Values.nsPrefix }}-docker-registry-key"
- name: "aai-service-port-8443"
port: 8443
targetPort: 8443
- nodePort: 30233
+ nodePort: {{ .Values.nodePortPrefix }}33
- name: "aai-service-port-8080"
port: 8080
targetPort: 8080
- nodePort: 30232
+ nodePort: {{ .Values.nodePortPrefix }}32
type: NodePort
selector:
app: aai-service
ports:
- name: "model-loader-service-port-8443"
port: 8443
- nodePort: 30229
+ nodePort: {{ .Values.nodePortPrefix }}29
- name: "model-loader-service-port-8080"
port: 8080
- nodePort: 30210
+ nodePort: {{ .Values.nodePortPrefix }}10
type: NodePort
selector:
app: model-loader-service
---
apiVersion: v1
kind: Service
-metadata:
- name: aai-dmaap
- namespace: "{{ .Values.nsPrefix }}-aai"
- labels:
- app: aai-dmaap
-spec:
- ports:
- - name: "aai-dmaap-port-3904"
- port: 3904
- - name: "aai-dmaap-port-3905"
- port: 3905
- selector:
- app: aai-dmaap
- clusterIP: None
----
-apiVersion: v1
-kind: Service
-metadata:
- name: aai-zookeeper
- namespace: "{{ .Values.nsPrefix }}-aai"
- labels:
- app: aai-zookeeper
-spec:
- ports:
- - name: "aai-zookeeper-port-2181"
- port: 2181
- selector:
- app: aai-zookeeper
- clusterIP: None
----
-apiVersion: v1
-kind: Service
metadata:
name: aai-traversal
namespace: "{{ .Values.nsPrefix }}-aai"
}
}
],
- "image": "oomk8s/readiness-check:1.0.0",
- "imagePullPolicy": "Always",
+ "image": "{{ .Values.image.readiness }}",
+ "imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "gremlin-readiness"
}
]'
- name: SERVER_TABLE
value: aaigraph.dev
- name: GREMLIN_HOST
- value: "gremlin.{{ .Values.nsPrefix }}-aai"
+ value: "gremlin"
ports:
- containerPort: 8182
readinessProbe:
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
aaiProxy: aaionap/haproxy
aaiHbaseVersion: latest
modelLoaderImage: nexus3.onap.org:10001/openecomp/model-loader
modelLoaderVersion: 1.1-STAGING-latest
- dmaapImage: attos/dmaap
- dmaapVersion: latest
aaiResourcesImage: nexus3.onap.org:10001/openecomp/aai-resources
aaiResourcesVersion: 1.1-STAGING-latest
aaiTraversalImage: nexus3.onap.org:10001/openecomp/aai-traversal
aaiTraversalVersion: 1.1-STAGING-latest
- aaiZookeeperImage: wurstmeister/zookeeper
- aaiZookeeperVersion: latest
dataRouterImage: nexus3.onap.org:10001/openecomp/data-router
dataRouterVersion: 1.1-STAGING-latest
elasticsearchImage: elasticsearch
sparkyBeImage: nexus3.onap.org:10001/openecomp/sparky-be
sparkyBeVersion: 1.1-STAGING-latest
gremlinServerImage: aaionap/gremlin-server
- kafkaImage: wurstmeister/kafka
- kafkaVersion: latest
\ No newline at end of file
- name: "appc-port-8282"
port: 8282
targetPort: 8181
- nodePort: 30230
+ nodePort: {{ .Values.nodePortPrefix }}30
- name: "appc-port-1830"
port: 1830
- nodePort: 30231
+ nodePort: {{ .Values.nodePortPrefix }}31
type: NodePort
selector:
app: appc
- name: "appc-dgbuilder-port"
port: 3000
targetPort: 3100
- nodePort: 30228
+ nodePort: {{ .Values.nodePortPrefix }}28
type: NodePort
selector:
app: appc-dgbuilder
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
appc: nexus3.onap.org:10001/openecomp/appc-image:1.1-STAGING-latest
mkdir -p /config-init/$NAMESPACE/aai/opt/aai/logroot/
mkdir -p /config-init/$NAMESPACE/aai/model-loader/logs/
mkdir -p /config-init/$NAMESPACE/aai/haproxy/log/
-mkdir -p /config-init/$NAMESPACE/aai/aai-traversal/logs/ajsc-jetty/gc/
-mkdir -p /config-init/$NAMESPACE/aai/aai-traversal/logs/dmaapAAIEventConsumer/
-mkdir -p /config-init/$NAMESPACE/aai/aai-traversal/logs/perf-audit/
-mkdir -p /config-init/$NAMESPACE/aai/aai-traversal/logs/rest/
-mkdir -p /config-init/$NAMESPACE/aai/aai-resources/logs/ajsc-jetty/gc/
-mkdir -p /config-init/$NAMESPACE/aai/aai-resources/logs/dmaapAAIEventConsumer/
-mkdir -p /config-init/$NAMESPACE/aai/aai-resources/logs/perf-audit/
-mkdir -p /config-init/$NAMESPACE/aai/aai-resources/logs/rest/
+mkdir -p /config-init/$NAMESPACE/aai/aai-traversal/logs/
+mkdir -p /config-init/$NAMESPACE/aai/aai-resources/logs/
mkdir -p /config-init/$NAMESPACE/aai/sparky-be/logs/
mkdir -p /config-init/$NAMESPACE/aai/elasticsearch/es-data/
+mkdir -p /config-init/$NAMESPACE/aai/search-data-service/logs/
+mkdir -p /config-init/$NAMESPACE/aai/data-router/logs/
chmod -R 777 /config-init/$NAMESPACE/sdc/logs/
chmod -R 777 /config-init/$NAMESPACE/portal/logs/
chmod -R 777 /config-init/$NAMESPACE/aai/aai-resources/logs/
chmod -R 777 /config-init/$NAMESPACE/aai/sparky-be/logs/
chmod -R 777 /config-init/$NAMESPACE/aai/elasticsearch/es-data/
+chmod -R 777 /config-init/$NAMESPACE/aai/search-data-service/logs/
+chmod -R 777 /config-init/$NAMESPACE/aai/data-router/logs/
+
# replace the default 'onap' namespace qualification of K8s hostnames within the config files
find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/\.onap-/\.$NAMESPACE-/g" {} \;
"AAI_KEYSTORE_PASSWD_X": "OBF:1vn21ugu1saj1v9i1v941sar1ugw1vo0",
"APPLICATION_SERVERS": "aai-service.onap-aai",
"AAI_DMAAP_PROTOCOL": "http",
- "AAI_DMAAP_HOST_PORT": "aai-dmaap.onap-aai:3904",
+ "AAI_DMAAP_HOST_PORT": "dmaap.onap-message-router:3904",
"AAI_DMAAP_TOPIC_NAME": "AAI-EVENT",
"AAI_NOTIFICATION_EVENT_DEFAULT_EVENT_STATUS": "UNPROCESSED",
"AAI_NOTIFICATION_EVENT_DEFAULT_EVENT_TYPE": "AAI-EVENT",
"AAI_KEYSTORE_PASSWD_X": "OBF:1vn21ugu1saj1v9i1v941sar1ugw1vo0",
"APPLICATION_SERVERS": "aai-service.onap-aai",
"AAI_DMAAP_PROTOCOL": "http",
- "AAI_DMAAP_HOST_PORT": "aai-dmaap.onap-aai:3904",
+ "AAI_DMAAP_HOST_PORT": "dmaap.onap-message-router:3904",
"AAI_DMAAP_TOPIC_NAME": "AAI-EVENT",
"AAI_NOTIFICATION_EVENT_DEFAULT_EVENT_STATUS": "UNPROCESSED",
"AAI_NOTIFICATION_EVENT_DEFAULT_EVENT_TYPE": "AAI-EVENT",
--- /dev/null
+###
+# ============LICENSE_START=======================================================
+# org.openecomp.aai
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+###
+
+cd /var/chef;
+
+CHEF_CONFIG_REPO=${CHEF_CONFIG_REPO:-aai-config};
+
+CHEF_GIT_URL=${CHEF_GIT_URL:-http://nexus.onap.org/r/aai};
+
+CHEF_CONFIG_GIT_URL=${CHEF_CONFIG_GIT_URL:-$CHEF_GIT_URL};
+CHEF_DATA_GIT_URL=${CHEF_DATA_GIT_URL:-$CHEF_GIT_URL};
+
+if [ ! -d "aai-config" ]; then
+
+ git clone --depth 1 -b ${CHEF_BRANCH} --single-branch ${CHEF_CONFIG_GIT_URL}/${CHEF_CONFIG_REPO}.git aai-config || {
+ echo "Error: Unable to clone the aai-config repo with url: ${CHEF_GIT_URL}/${CHEF_CONFIG_REPO}.git";
+ exit;
+ }
+
+ (cd aai-config/cookbooks/aai-resources/ && \
+ for f in $(ls); do mv $f ../; done && \
+ cd ../ && rmdir aai-resources);
+fi
+
+
+chef-solo \
+ -c /var/chef/aai-data/chef-config/dev/.knife/solo.rb \
+ -j /var/chef/aai-config/cookbooks/runlist-aai-resources.json \
+ -E ${AAI_CHEF_ENV};
+
+# TODO: If this runs, startup hangs and logs errors indicating aaiGraph.dev already exists in HBASE.
+# Commenting out until we figure out whether it is needed or not.
+# /opt/app/aai-resources/bin/createDBSchema.sh || {
+# echo "Error: Unable to create the db schema, please check if the hbase host is configured and up";
+# exit;
+# }
+
+
+java -cp ${CLASSPATH}:/opt/app/commonLibs/*:/opt/app/aai-resources/etc:/opt/app/aai-resources/lib/*:/opt/app/aai-resources/extJars/logback-access-1.1.7.jar:/opt/app/aai-resources/extJars/logback-core-1.1.7.jar:/opt/app/aai-resources/extJars/aai-core-${AAI_CORE_VERSION}.jar -server -XX:NewSize=512m -XX:MaxNewSize=512m -XX:SurvivorRatio=8 -XX:+DisableExplicitGC -verbose:gc -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseBiasedLocking -XX:ParallelGCThreads=4 -XX:LargePageSizeInBytes=128m -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Dsun.net.inetaddr.ttl=180 -XX:+HeapDumpOnOutOfMemoryError -Dhttps.protocols=TLSv1.1,TLSv1.2 -DSOACLOUD_SERVICE_VERSION=1.0.1 -DAJSC_HOME=/opt/app/aai-resources/ -DAJSC_CONF_HOME=/opt/app/aai-resources/bundleconfig -DAJSC_SHARED_CONFIG=/opt/app/aai-resources/bundleconfig -DAFT_HOME=/opt/app/aai-resources -DAAI_CORE_VERSION=${AAI_CORE_VERSION} -Daai-core.version=${AAI_CORE_VERSION} -Dlogback.configurationFile=/opt/app/aai-resources/bundleconfig/etc/logback.xml -Xloggc:/opt/app/aai-resources/logs/ajsc-jetty/gc/graph-query_gc.log com.att.ajsc.runner.Runner context=/ port=8087 sslport=8447
\ No newline at end of file
--- /dev/null
+###
+# ============LICENSE_START=======================================================
+# org.openecomp.aai
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+###
+
+cd /var/chef;
+
+CHEF_CONFIG_REPO=${CHEF_CONFIG_REPO:-aai-config};
+
+CHEF_GIT_URL=${CHEF_GIT_URL:-http://nexus.onap.org/r/aai};
+
+CHEF_CONFIG_GIT_URL=${CHEF_CONFIG_GIT_URL:-$CHEF_GIT_URL};
+CHEF_DATA_GIT_URL=${CHEF_DATA_GIT_URL:-$CHEF_GIT_URL};
+
+if [ ! -d "aai-config" ]; then
+
+ git clone --depth 1 -b ${CHEF_BRANCH} --single-branch ${CHEF_CONFIG_GIT_URL}/${CHEF_CONFIG_REPO}.git aai-config || {
+ echo "Error: Unable to clone the aai-config repo with url: ${CHEF_GIT_URL}/${CHEF_CONFIG_REPO}.git";
+ exit;
+ }
+
+ (cd aai-config/cookbooks/aai-traversal/ && \
+ for f in $(ls); do mv $f ../; done && \
+ cd ../ && rmdir aai-traversal);
+fi
+
+chef-solo \
+ -c /var/chef/aai-data/chef-config/dev/.knife/solo.rb \
+ -j /var/chef/aai-config/cookbooks/runlist-aai-traversal.json \
+ -E ${AAI_CHEF_ENV};
+
+java -cp ${CLASSPATH}:/opt/app/commonLibs/*:/opt/app/aai-traversal/etc:/opt/app/aai-traversal/lib/*:/opt/app/aai-traversal/extJars/logback-access-1.1.7.jar:/opt/app/aai-traversal/extJars/logback-core-1.1.7.jar:/opt/app/aai-traversal/extJars/aai-core-${AAI_CORE_VERSION}.jar -server -XX:NewSize=512m -XX:MaxNewSize=512m -XX:SurvivorRatio=8 -XX:+DisableExplicitGC -verbose:gc -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseBiasedLocking -XX:ParallelGCThreads=4 -XX:LargePageSizeInBytes=128m -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Dsun.net.inetaddr.ttl=180 -XX:+HeapDumpOnOutOfMemoryError -Dhttps.protocols=TLSv1.1,TLSv1.2 -DSOACLOUD_SERVICE_VERSION=1.0.1 -DAJSC_HOME=/opt/app/aai-traversal/ -DAJSC_CONF_HOME=/opt/app/aai-traversal/bundleconfig -DAJSC_SHARED_CONFIG=/opt/app/aai-traversal/bundleconfig -DAFT_HOME=/opt/app/aai-traversal -DAAI_CORE_VERSION=${AAI_CORE_VERSION} -Daai-core.version=${AAI_CORE_VERSION} -Dlogback.configurationFile=/opt/app/aai-traversal/bundleconfig/etc/logback.xml -Xloggc:/opt/app/aai-traversal/logs/ajsc-jetty/gc/graph-query_gc.log com.att.ajsc.runner.Runner context=/ port=8086 sslport=8446
\ No newline at end of file
<route xmlns="http://camel.apache.org/schema/spring" trace="true">
- <from uri="event-bus:mybus/?eventTopic=AAI-EVENT&groupName=datarouter&groupId=datarouter&url="http://aai-dmaap:3904"/>
+ <from uri="event-bus:mybus/?eventTopic=AAI-EVENT&groupName=datarouter&groupId=datarouter&url="http://dmaap.onap-message-router:3904"/>
<to uri="bean:entityEventPolicy?method=process"/>
</route>
+++ /dev/null
-0
-25
-ECOMP-PORTAL-OUTBOX-VID1 0 0
-PDPD-CONFIGURATION 0 2
-msgrtr.apinode.metrics.dmaap 1 26
-unauthenticated.SEC_MEASUREMENT_OUTPUT 1 1
-APPC-TEST2 0 0
-unauthenticated.TCA_EVENT_OUTPUT 1 1
-APPC-TEST1 0 0
-APPC-CL 0 2
-ECOMP-PORTAL-INBOX 0 0
-APPC-CL 1 0
-APPC-TEST2 1 1
-unauthenticated.TCA_EVENT_OUTPUT 0 1
-unauthenticated.SEC_MEASUREMENT_OUTPUT 0 1
-SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-POLICY-CL-MGT 1 1
-PDPD-CONFIGURATION 1 0
-DCAE-CL-EVENT 1 1
-msgrtr.apinode.metrics.dmaap 0 4
-ECOMP-PORTAL-OUTBOX-APP1 0 0
-ECOMP-PORTAL-OUTBOX-SDC1 0 0
-POLICY-CL-MGT 0 1
-SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-DCAE-CL-EVENT 0 1
-ECOMP-PORTAL-OUTBOX-DBC1 0 0
-ECOMP-PORTAL-OUTBOX-POL1 0 0
+++ /dev/null
-0
-25
-ECOMP-PORTAL-OUTBOX-VID1 0 0
-PDPD-CONFIGURATION 0 2
-msgrtr.apinode.metrics.dmaap 1 26
-unauthenticated.SEC_MEASUREMENT_OUTPUT 1 1
-APPC-TEST2 0 0
-unauthenticated.TCA_EVENT_OUTPUT 1 1
-APPC-TEST1 0 0
-APPC-CL 0 2
-ECOMP-PORTAL-INBOX 0 0
-APPC-CL 1 0
-APPC-TEST2 1 1
-unauthenticated.TCA_EVENT_OUTPUT 0 1
-unauthenticated.SEC_MEASUREMENT_OUTPUT 0 1
-SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-POLICY-CL-MGT 1 1
-PDPD-CONFIGURATION 1 0
-DCAE-CL-EVENT 1 1
-msgrtr.apinode.metrics.dmaap 0 4
-ECOMP-PORTAL-OUTBOX-APP1 0 0
-ECOMP-PORTAL-OUTBOX-SDC1 0 0
-POLICY-CL-MGT 0 1
-SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1 0 0
-DCAE-CL-EVENT 0 1
-ECOMP-PORTAL-OUTBOX-DBC1 0 0
-ECOMP-PORTAL-OUTBOX-POL1 0 0
+++ /dev/null
-#!/bin/bash
-
-if [[ -z "$KAFKA_PORT" ]]; then
- export KAFKA_PORT=9092
-fi
-if [[ -z "$KAFKA_ADVERTISED_PORT" ]]; then
- export KAFKA_ADVERTISED_PORT=$(docker port `hostname` $KAFKA_PORT | sed -r "s/.*:(.*)/\1/g")
-fi
-if [[ -z "$KAFKA_BROKER_ID" ]]; then
- # By default auto allocate broker ID
- #export KAFKA_BROKER_ID=-1
- export KAFKA_BROKER_ID=1
-fi
-#if [[ -z "$KAFKA_LOG_DIRS" ]]; then
- #export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME"
- export KAFKA_LOG_DIRS="/kafka/kafka-logs"
-#fi
-if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
- export KAFKA_ZOOKEEPER_CONNECT=$(env | grep ZK.*PORT_2181_TCP= | sed -e 's|.*tcp://||' | paste -sd ,)
-fi
-
-if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
- sed -r -i "s/(export KAFKA_HEAP_OPTS)=\"(.*)\"/\1=\"$KAFKA_HEAP_OPTS\"/g" $KAFKA_HOME/bin/kafka-server-start.sh
- unset KAFKA_HEAP_OPTS
-fi
-
-if [[ -z "$KAFKA_ADVERTISED_HOST_NAME" && -n "$HOSTNAME_COMMAND" ]]; then
- export KAFKA_ADVERTISED_HOST_NAME=$(eval $HOSTNAME_COMMAND)
-fi
-
-for VAR in `env`
-do
- if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME ]]; then
- kafka_name=`echo "$VAR" | sed -r "s/KAFKA_(.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .`
- env_var=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"`
- if egrep -q "(^|^#)$kafka_name=" $KAFKA_HOME/config/server.properties; then
- sed -r -i "s@(^|^#)($kafka_name)=(.*)@\2=${!env_var}@g" $KAFKA_HOME/config/server.properties #note that no config values may contain an '@' char
- else
- echo "$kafka_name=${!env_var}" >> $KAFKA_HOME/config/server.properties
- fi
- fi
-done
-
-if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then
- eval $CUSTOM_INIT_SCRIPT
-fi
-
-
-KAFKA_PID=0
-
-# see https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86#.bh35ir4u5
-term_handler() {
- echo 'Stopping Kafka....'
- if [ $KAFKA_PID -ne 0 ]; then
- kill -s TERM "$KAFKA_PID"
- wait "$KAFKA_PID"
- fi
- echo 'Kafka stopped.'
- exit
-}
-
-
-# Capture kill requests to stop properly
-trap "term_handler" SIGHUP SIGINT SIGTERM
-create-topics.sh &
-$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties &
-KAFKA_PID=$!
-
-wait "$KAFKA_PID"
+++ /dev/null
-###############################################################################
-##
-## Cambria API Server config
-##
-## - Default values are shown as commented settings.
-##
-
-###############################################################################
-##
-## HTTP service
-##
-## - 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-config.zk.servers=aai-zookeeper.onap-aai:2181
-#config.zk.servers=172.17.0.1:2181
-#dmaap.onap-message-router:2181
-#10.208.128.229:2181
-#config.zk.root=/fe3c/cambria/config
-
-
-###############################################################################
-##
-## Kafka Connection
-##
-## Items below are passed through to Kafka's producer and consumer
-## configurations (after removing "kafka.")
-## if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-kafka.metadata.broker.list=aai-kafka.onap-aai:9092
-#kafka.metadata.broker.list=172.17.0.1:9092
-#dmaap.onap-message-router:9092
-#10.208.128.229:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=6000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-
-###############################################################################
-##
-## Secured Config
-##
-## Some data stored in the config system is sensitive -- API keys and secrets,
-## for example. to protect it, we use an encryption layer for this section
-## of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-## Kafka expects live connections from the consumer to the broker, which
-## obviously doesn't work over connectionless HTTP requests. The Cambria
-## server proxies HTTP requests into Kafka consumer sessions that are kept
-## around for later re-use. Not doing so is costly for setup per request,
-## which would substantially impact a high volume consumer's performance.
-##
-## This complicates Cambria server failover, because we often need server
-## A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-#cambria.consumer.cache.touchFreqMs=120000
-
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-###############################################################################
-##
-## Metrics Reporting
-##
-## This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.openecomp.dcae.dmaap.mtnje2.mr.topic
-msgRtr.topicfactory.aaf=org.openecomp.dcae.dmaap.topicFactory|:org.openecomp.dcae.dmaap.mtnje2.mr.topic:
-enforced.topic.name.AAF=org.openecomp
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.openecomp.dmaap.mr.ueb
-##############################################################################
-#Mirror Maker Agent
-msgRtr.mirrormakeradmin.aaf=org.openecomp.dmaap.mr.dev.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.openecomp.dmaap.mr.dev.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.openecomp.dmaap.mr.dev.topicFactory|:org.openecomp.dmaap.mr.dev.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.openecomp.dmaap.mr.prod.mm.agent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
+++ /dev/null
-basic_realm=openecomp.org
-basic_warn=TRUE
-
-cadi_loglevel=DEBUG
-#cadi_keyfile=target/swm/package/nix/dist_files/appl/${artifactId}/etc/keyfile2
-cadi_keyfile=/appl/dmaapMR1/etc/keyfile
-# Configure AAF
-aaf_url=https://DME2RESOLVE/service=org.openecomp.authz.AuthorizationService/version=2.0/envContext=DEV/routeOffer=BAU_SE
-
-aaf_id=dgl@openecomp.org
-aaf_password=enc:f2u5br1mh29M02-
-aaf_timeout=5000
-aaf_clean_interval=1200000
-aaf_user_expires=60000
-aaf_high_count=1000000
-
-
-# The following properties are being set by the AJSC Container and should NOT need to be set here.
-AFT_LATITUDE=33.823589
-AFT_LONGITUDE=-84.366982
-AFT_ENVIRONMENT=AFTUAT
+++ /dev/null
-_sNOLphPzrU7L0L3oWv0pYwgV_ddGF1XoBsQEIAp34jfP-fGJFPfFYaMpDEZ3gwH59rNw6qyMZHk
-k-4irklvVcWk36lC3twNvc0DueRCVrws1bkuhOLCXdxHJx-YG-1xM8EJfRmzh79WPlPkbAdyPmFF
-Ah44V0GjAnInPOFZA6MHP9rNx9B9qECHRfmvzU13vJCcgTsrmOr-CEiWfRsnzPjsICxpq9OaVT_D
-zn6rNaroGm1OiZNCrCgvRkCUHPOOCw3j9G1GeaImoZNYtozbz9u4sj13PU-MxIIAa64b1bMMMjpz
-Upc8lVPI4FnJKg6axMmEGn5zJ6JUq9mtOVyPj__2GEuDgpx5H4AwodXXVjFsVgR8UJwI_BvS2JVp
-JoQk0J1RqXmAXVamlsMAfzmmbARXgmrBfnuhveZnh9ymFVU-YZeujdANniXAwBGI7c6hG_BXkH7i
-Eyf4Fn41_SV78PskP6qgqJahr9r3bqdjNbKBztIKCOEVrE_w3IM5r02l-iStk_NBRkj6cq_7VCpG
-afxZ2CtZMwuZMiypO_wOgbdpCSKNzsL-NH2b4b08OlKiWb263gz634KJmV5WEfCl-6eH-JUFbWOS
-JwQfActLNT2ZQPl2MyZQNBzJEWoJRgS6k7tPRO-zqeUtYYHGHVMCxMuMHGQcoilNNHEFeBCG_fBh
-yAKb9g9F86Cbx9voMLiyTX2T3rwVHiSJFOzfNxGmfN5JWOthIun_c5hEY1tLQ15BomzkDwk7BAj7
-VbRCrVD45B6xrmSTMBSWYmLyr6mnQxQqeh9cMbD-0ZAncE3roxRnRvPKjFFa208ykYUp2V83r_PJ
-fV5I9ZPKSjk9DwFyrjkcQQEYDhdK6IFqcd6nEthjYVkmunu2fsX0bIOm9GGdIbKGqBnpdgBO5hyT
-rBr9HSlZrHcGdti1R823ckDF0Ekcl6kioDr5NLIpLtg9zUEDRm3QrbX2mv5Zs8W0pYnOqglxy3lz
-bJZTN7oR7VasHUtjmp0RT9nLZkUs5TZ6MHhlIq3ZsQ6w_Q9Rv1-ofxfwfCC4EBrWKbWAGCf6By4K
-Ew8321-2YnodhmsK5BrT4zQ1DZlmUvK8BmYjZe7wTljKjgYcsLTBfX4eMhJ7MIW1kpnl8AbiBfXh
-QzN56Mki51Q8PSQWHm0W9tnQ0z6wKdck6zBJ8JyNzewZahFKueDTn-9DOqIDfr3YHvQLLzeXyJ8e
-h4AgjW-hvlLzRGtkCknjLIgXVa3rMTycseAwbW-mgdCqqkw3SdEG8feAcyntmvE8j2jbtSDStQMB
-9JdvyNLuQdNG4pxpusgvVso0-8NQF0YVa9VFwg9U6IPSx5p8FcW68OAHt_fEgT4ZtiH7o9aur4o9
-oYqUh2lALCY-__9QLq1KkNjMKs33Jz9E8LbRerG9PLclkTrxCjYAeUWBjCwSI7OB7xkuaYDSjkjj
-a46NLpdBN1GNcsFFcZ79GFAK0_DsyxGLX8Tq6q0Bvhs8whD8wlSxpTGxYkyqNX-vcb7SDN_0WkCE
-XSdZWkqTHXcYbOvoCOb_e6SFAztuMenuHWY0utX0gBfx_X5lPDFyoYXErxFQHiA7t27keshXNa6R
-ukQRRS8kMjre1U74sc-fRNXkXpl57rG4rgxaEX0eBeowa53KAsVvUAoSac2aC_nfzXrDvoyf9Xi3
-JpEZNhUDLpFCEycV4I7jGQ9wo9qNaosvlsr6kbLDNdb_1xrGVgjT3xEvRNJNPqslSAu-yD-UFhC3
-AmCdYUnugw_eEFqXCHTARcRkdPPvl2XsmEKY2IqEeO5tz4DyXQFaL-5hEVh6lYEU1EOWHk3UGIXe
-Vc5_Ttp82qNLmlJPbZvgmNTJzYTHDQ_27KBcp7IVVZgPDjVKdWqQvZ18KhxvfF3Idgy82LBZniFV
-IbtxllXiPRxoPQriSXMnXjh3XkvSDI2pFxXfEvLRn1tvcFOwPNCz3QfPIzYg8uYXN5bRt3ZOrR_g
-ZhIlrc7HO0VbNbeqEVPKMZ-cjkqGj4VAuDKoQc0eQ6X_wCoAGO78nPpLeIvZPx1X3z5YoqNA
\ No newline at end of file
#
# The ip address/hostname and port to the desired Search Data Service instance
#
-search-service.ipAddress=search-data-service
+search-service.ipAddress=search-data-service.onap-aai
search-service.httpPort=9509
############################## Indexes ##############################
+++ /dev/null
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-<tr>
- <td><a name="{name}"><xsl:value-of select="name"/></a></td>
- <td><xsl:value-of select="value"/></td>
- <td><xsl:value-of select="description"/></td>
-</tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>
+++ /dev/null
-# Define some default values that can be overridden by system properties
-zookeeper.root.logger=INFO, CONSOLE
-zookeeper.console.threshold=INFO
-zookeeper.log.dir=.
-zookeeper.log.file=zookeeper.log
-zookeeper.log.threshold=DEBUG
-zookeeper.tracelog.dir=.
-zookeeper.tracelog.file=zookeeper_trace.log
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=${zookeeper.root.logger}
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-# Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
-log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-# Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
+++ /dev/null
-# The number of milliseconds of each tick
-tickTime=2000
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit=10
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit=5
-# the directory where the snapshot is stored.
-# do not use /tmp for storage, /tmp here is just
-# example sakes.
-dataDir=/opt/zookeeper-3.4.9/data
-# the port at which the clients will connect
-clientPort=2181
-# the maximum number of client connections.
-# increase this if you need to handle more clients
-#maxClientCnxns=60
-#
-# Be sure to read the maintenance section of the
-# administrator guide before turning on autopurge.
-#
-# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
-#
-# The number of snapshots to retain in dataDir
-autopurge.snapRetainCount=3
-# Purge task interval in hours
-# Set to "0" to disable auto purge feature
-autopurge.purgeInterval=1
ports:
- name: mr1
port: 3904
- nodePort: 30227
+ nodePort: {{ .Values.nodePortPrefix }}27
- name: mr2
port: 3905
- nodePort: 30226
+ nodePort: {{ .Values.nodePortPrefix }}26
selector:
app: dmaap
type: NodePort
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
dmaap: attos/dmaap:latest
spec:
ports:
- port: 3306
- nodePort: 30252
+ nodePort: {{ .Values.nodePortPrefix }}52
selector:
app: mariadb
type: NodePort
ports:
- name: mso1
port: 8080
- nodePort: 30223
+ nodePort: {{ .Values.nodePortPrefix }}23
- name: mso2
port: 3904
- nodePort: 30225
+ nodePort: {{ .Values.nodePortPrefix }}25
- name: mso3
port: 3905
- nodePort: 30224
+ nodePort: {{ .Values.nodePortPrefix }}24
- name: mso4
port: 9990
- nodePort: 30222
+ nodePort: {{ .Values.nodePortPrefix }}22
- name: mso5
port: 8787
- nodePort: 30250
+ nodePort: {{ .Values.nodePortPrefix }}50
type: NodePort
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
mso: nexus3.onap.org:10001/openecomp/mso:1.1-STAGING-latest
}
create_onap_helm() {
- helm install ../$2/ --name $2 --namespace $1 --set nsPrefix=$1
-}
-
-configure_app() {
- # if previous configuration exists put back original template file
- for file in $3/*.yaml; do
- if [ -e "$file-template" ]; then
- mv "$file-template" "${file%}"
- fi
- done
-
- if [ -e "$2/Chart.yaml" ]; then
- sed -i-- 's/nodePort: [0-9]\{2\}[02468]\{1\}/nodePort: '"$4"'/g' $3/all-services.yaml
- sed -i-- 's/nodePort: [0-9]\{2\}[13579]\{1\}/nodePort: '"$5"'/g' $3/all-services.yaml
- fi
+ helm install ../$2/ --name $1-$2 --namespace $1 --set nsPrefix=$1 --set nodePortPrefix=$3
}
MAX_INSTANCE=5
DU=$ONAP_DOCKER_USER
DP=$ONAP_DOCKER_PASS
+_FILES_PATH=$(echo ../$i/templates)
while getopts ":n:u:s:i:a:du:dp:" PARAM; do
case $PARAM in
create_registry_key $NS $i ${NS}-docker-registry-key $ONAP_DOCKER_REGISTRY $DU $DP $ONAP_DOCKER_MAIL
printf "\nCreating deployments and services **********\n"
- _FILES_PATH=$(echo ../$i/templates)
- configure_app $NS $i $_FILES_PATH $start $end
- create_onap_helm $NS $i
+ create_onap_helm $NS $i $start
printf "\n"
done
}
delete_app_helm() {
- helm delete $1 --purge
+ helm delete $1-$2 --purge
}
usage() {
for i in ${HELM_APPS[@]}; do
- delete_app_helm $i
+ delete_app_helm $NS $i
delete_namespace $NS $i
done
ports:
- name: "drools-port"
port: 6969
- nodePort: 30217
+ nodePort: {{ .Values.nodePortPrefix }}17
selector:
app: drools
type: NodePort
ports:
- name: 8443-port
port: 8443
- nodePort: 30219
+ nodePort: {{ .Values.nodePortPrefix }}19
- name: 9091-port
port: 9091
- nodePort: 30218
+ nodePort: {{ .Values.nodePortPrefix }}18
selector:
app: pap
type: NodePort
ports:
- name: 8081-port
port: 8081
- nodePort: 30220
+ nodePort: {{ .Values.nodePortPrefix }}20
selector:
app: pdp
type: NodePort
ports:
- name: tcp-31032-8480-bm91k
port: 8480
- nodePort: 30221
+ nodePort: {{ .Values.nodePortPrefix }}21
selector:
app: pypdp
type: NodePort
ports:
- name: 9989-port
port: 9989
- nodePort: 30216
+ nodePort: {{ .Values.nodePortPrefix }}16
selector:
app: brmsgw
type: NodePort
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
policyPe: nexus3.onap.org:10001/openecomp/policy/policy-pe:1.0-STAGING-latest
spec:
ports:
- name: portal-1
- nodePort: 30213
+ nodePort: {{ .Values.nodePortPrefix }}13
port: 8006
targetPort: 8005
- name: portal-2
- nodePort: 30214
+ nodePort: {{ .Values.nodePortPrefix }}14
port: 8010
targetPort: 8009
- name: portal-3
- nodePort: 30215
+ nodePort: {{ .Values.nodePortPrefix }}15
port: 8989
targetPort: 8080
selector:
- name: tcp-1
port: 6080
targetPort: 80
- nodePort: 30211
+ nodePort: {{ .Values.nodePortPrefix }}11
- name: tcp-2
port: 5900
targetPort: 5900
- nodePort: 30212
+ nodePort: {{ .Values.nodePortPrefix }}12
selector:
app: vnc-portal
type: NodePort
},
{
"command": ["/bin/sh","-c"],
- "args": ["echo `host sdc-be.{{ .Values.nsPrefix }}-sdc | awk ''{print$4}''` sdc.api.be.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host portalapps.{{ .Values.nsPrefix }}-portal | awk ''{print$4}''` portal.api.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host pap.{{ .Values.nsPrefix }}-policy | awk ''{print$4}''` policy.api.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host sdc-fe.{{ .Values.nsPrefix }}-sdc | awk ''{print$4}''` sdc.ui.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host vid-server.{{ .Values.nsPrefix }}-vid | awk ''{print$4}''` vid.api.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host sparky-fe.{{ .Values.nsPrefix }}-aai | awk ''{print$4}''` aai.api.simpledemo.openecomp.org >> /ubuntu-init/hosts"],
+ "args": ["echo `host sdc-be.{{ .Values.nsPrefix }}-sdc | awk ''{print$4}''` sdc.api.be.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host portalapps.{{ .Values.nsPrefix }}-portal | awk ''{print$4}''` portal.api.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host pap.{{ .Values.nsPrefix }}-policy | awk ''{print$4}''` policy.api.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host sdc-fe.{{ .Values.nsPrefix }}-sdc | awk ''{print$4}''` sdc.ui.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host vid-server.{{ .Values.nsPrefix }}-vid | awk ''{print$4}''` vid.api.simpledemo.openecomp.org >> /ubuntu-init/hosts; echo `host sparky-be.{{ .Values.nsPrefix }}-aai | awk ''{print$4}''` aai.api.simpledemo.openecomp.org >> /ubuntu-init/hosts"],
"image": "{{ .Values.image.ubuntuInit }}",
"imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "vnc-init-hosts",
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
portalapps: nexus3.onap.org:10001/openecomp/portalapps:1.1-STAGING-latest
spec:
ports:
- port: 88
- nodePort: 30209
+ nodePort: {{ .Values.nodePortPrefix }}09
selector:
app: robot
type: NodePort
spec:
ports:
- port: 88
- nodePort: 30209
+ nodePort: {{ .Values.nodePortPrefix }}09
selector:
app: robot
type: NodePort
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
testsuite: nexus3.onap.org:10001/openecomp/testsuite:1.1-STAGING-latest
spec:
ports:
- name: sdc-be-port-8443
- nodePort: 30204
+ nodePort: {{ .Values.nodePortPrefix }}04
port: 8443
- name: sdc-be-port-8080
- nodePort: 30205
+ nodePort: {{ .Values.nodePortPrefix }}05
port: 8080
selector:
app: sdc-be
spec:
ports:
- name: sdc-fe-port-9443
- nodePort: 30207
+ nodePort: {{ .Values.nodePortPrefix }}07
port: 9443
- name: sdc-fe-port-8181
- nodePort: 30206
+ nodePort: {{ .Values.nodePortPrefix }}06
port: 8181
selector:
app: sdc-fe
name: sdc-sdc-es-es
- mountPath: /root/chef-solo/environments/
name: sdc-environments
- - mountPath: /var/lib/jetty/etc/keystore
- name: sdc-jetty-keystore
- mountPath: /etc/localtime
name: sdc-localtime
- mountPath: /var/lib/jetty/logs
- name: sdc-environments
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/environments
- - name: sdc-jetty-keystore
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/jetty/keystore
- name: sdc-localtime
hostPath:
path: /etc/localtime
name: sdc-sdc-es-es
- mountPath: /root/chef-solo/environments/
name: sdc-environments
- - mountPath: /var/lib/jetty/etc/keystore
- name: sdc-jetty-keystore
- mountPath: /etc/localtime
name: sdc-localtime
- mountPath: /var/lib/jetty/logs
- name: sdc-environments
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/environments
- - name: sdc-jetty-keystore
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/jetty/keystore
- name: sdc-localtime
hostPath:
path: /etc/localtime
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
sdcKibana: nexus3.onap.org:10001/openecomp/sdc-kibana:1.1-STAGING-latest
- name: "sdnc-dgbuilder-port"
port: 3000
targetPort: 3100
- nodePort: 30203
+ nodePort: {{ .Values.nodePortPrefix }}03
type: NodePort
selector:
app: sdnc-dgbuilder
- name: "sdnc-port"
port: 8282
targetPort: 8181
- nodePort: 30202
+ nodePort: {{ .Values.nodePortPrefix }}02
type: NodePort
selector:
app: sdnc
ports:
- name: "sdnc-portal-port"
port: 8843
- nodePort: 30201
+ nodePort: {{ .Values.nodePortPrefix }}01
type: NodePort
selector:
app: sdnc-portal
nsPrefix: onap
pullPolicy: Always
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
mysqlServer: mysql/mysql-server:5.6
spec:
ports:
- name: vid-server
- nodePort: 30200
+ nodePort: {{ .Values.nodePortPrefix }}00
port: 8080
selector:
app: vid-server
nsPrefix: onap
pullPolicy: IfNotPresent
+nodePortPrefix: 302
image:
readiness: oomk8s/readiness-check:1.0.0
mariadb: nexus3.onap.org:10001/library/mariadb:10
imports:
- https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
- # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.0/cloudify_kubernetes_plugin-1.2.0-py27-none-linux_x86_64-centos-Core.wgn
- - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.0/plugin.yaml
+ # Plugin required: https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/releases/download/1.2.1rc1/cloudify_kubernetes_plugin-1.2.1rc1-py27-none-linux_x86_64-centos-Core.wgn
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-kubernetes-plugin/1.2.1rc1/plugin.yaml
+ # Plugin required: http://repository.cloudifysource.org/cloudify/wagons/cloudify-fabric-plugin/1.4.2/cloudify_fabric_plugin-1.4.2-py27-none-linux_x86_64-centos-Core.wgn
+ - http://www.getcloudify.org/spec/fabric-plugin/1.4.2/plugin.yaml
- cloudify/types/onap.yaml
inputs:
description: >
File content of kubernetes master YAML configuration
- apps:
- description: >
- List of ONAP apps names to be deployed.
- Default empty array (deploy all available apps).
- default: []
-
namespace_prefix:
type: string
description: >
Kubernetes namespace name prefix which will be uese for all ONAP apps
default: onap
- docker_registry:
- type: string
- default: regsecret
-
- docker_server:
- type: string
- default: nexus3.onap.org:10001
-
- docker_username:
- type: string
- default: docker
-
- docker_password:
- type: string
- default: docker
-
- docker_email:
- type: string
- default: email@email.com
-
dsl_definitions:
- inputs: &app_inputs
- namespace_prefix: { get_input: namespace_prefix }
- docker_registry: { get_input: docker_registry }
- docker_server: { get_input: docker_server }
- docker_username: { get_input: docker_username }
- docker_password: { get_input: docker_password }
- docker_email: { get_input: docker_email }
+ options: &app_options
+ namespace:
+ concat: [{ get_input: namespace_prefix }, '-', { get_property: [SELF, name] }]
node_templates:
kubernetes_master:
configuration:
file_content: { get_input: kubernetes_configuration_file_content }
- init_pod:
- type: cloudify.kubernetes.resources.Pod
+ onap_environment:
+ type: cloudify.onap.kubernetes.Environment
properties:
- definition:
- file:
- resource_path: kubernetes/config/pod-config-init.yaml
+ namespace: { get_input: namespace_prefix }
+ init_pod: kubernetes/config/pod-config-init.yaml
+ options:
+ namespace: { get_input: namespace_prefix }
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
type: cloudify.onap.kubernetes.App
properties:
name: mso
+ values: kubernetes/mso/values.yaml
resources:
- - kubernetes/mso/templates/mso-deployment.yaml
- - kubernetes/mso/templates/db-deployment.yaml
+ - kubernetes/mso/templates/mso-deployment.yaml
+ - kubernetes/mso/templates/db-deployment.yaml
services: kubernetes/mso/templates/all-services.yaml
- inputs: *app_inputs
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
message_router_app:
type: cloudify.onap.kubernetes.App
properties:
name: message-router
+ values: kubernetes/message-router/values.yaml
resources:
- kubernetes/message-router/templates/message-router-zookeeper.yaml
- kubernetes/message-router/templates/message-router-dmaap.yaml
- kubernetes/message-router/templates/message-router-kafka.yaml
services: kubernetes/message-router/templates/all-services.yaml
- inputs: *app_inputs
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
sdc_app:
type: cloudify.onap.kubernetes.App
properties:
name: sdc
+ values: kubernetes/sdc/values.yaml
resources:
- - kubernetes/sdc/sdc-es.yaml
- - kubernetes/sdc/sdc-fe.yaml
- - kubernetes/sdc/sdc-kb.yaml
- - kubernetes/sdc/sdc-cs.yaml
- - kubernetes/sdc/sdc-be.yaml
- services: kubernetes/sdc/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/sdc/templates/sdc-es.yaml
+ - kubernetes/sdc/templates/sdc-fe.yaml
+ - kubernetes/sdc/templates/sdc-kb.yaml
+ - kubernetes/sdc/templates/sdc-cs.yaml
+ - kubernetes/sdc/templates/sdc-be.yaml
+ services: kubernetes/sdc/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
aai_app:
type: cloudify.onap.kubernetes.App
properties:
name: aai
+ values: kubernetes/aai/values.yaml
resources:
- - kubernetes/aai/aai-deployment.yaml
- - kubernetes/aai/modelloader-deployment.yaml
- - kubernetes/aai/hbase-deployment.yaml
- services: kubernetes/aai/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/aai/templates/aai-deployment.yaml
+ - kubernetes/aai/templates/modelloader-deployment.yaml
+ - kubernetes/aai/templates/hbase-deployment.yaml
+ services: kubernetes/aai/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
robot_app:
type: cloudify.onap.kubernetes.App
properties:
name: robot
+ values: kubernetes/robot/values.yaml
resources:
- - kubernetes/robot/robot-deployment.yaml
- services: kubernetes/robot/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/robot/templates/robot-deployment.yaml
+ services: kubernetes/robot/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
vid_app:
type: cloudify.onap.kubernetes.App
properties:
name: vid
+ values: kubernetes/vid/values.yaml
resources:
- - kubernetes/vid/vid-mariadb-deployment.yaml
- - kubernetes/vid/vid-server-deployment.yaml
- services: kubernetes/vid/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/templates/vid-mariadb-deployment.yaml
+ - kubernetes/templates/vid-server-deployment.yaml
+ services: kubernetes/vid/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
sdnc_app:
type: cloudify.onap.kubernetes.App
properties:
name: sdnc
+ values: kubernetes/sdnc/values.yaml
resources:
- - kubernetes/sdnc/web-deployment.yaml
- - kubernetes/sdnc/sdnc-deployment.yaml
- - kubernetes/sdnc/dgbuilder-deployment.yaml
- - kubernetes/sdnc/db-deployment.yaml
- services: kubernetes/sdnc/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/sdnc/templates/web-deployment.yaml
+ - kubernetes/sdnc/templates/sdnc-deployment.yaml
+ - kubernetes/sdnc/templates/dgbuilder-deployment.yaml
+ - kubernetes/sdnc/templates/db-deployment.yaml
+ services: kubernetes/sdnc/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
portal_app:
type: cloudify.onap.kubernetes.App
properties:
name: portal
+ values: kubernetes/portal/values.yaml
resources:
- - kubernetes/portal/portal-widgets-deployment.yaml
- - kubernetes/portal/portal-apps-deployment.yaml
- - kubernetes/portal/portal-mariadb-deployment.yaml
- - kubernetes/portal/portal-vnc-dep.yaml
- services: kubernetes/portal/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/portal/templates/portal-widgets-deployment.yaml
+ - kubernetes/portal/templates/portal-apps-deployment.yaml
+ - kubernetes/portal/templates/portal-mariadb-deployment.yaml
+ - kubernetes/portal/templates/portal-vnc-dep.yaml
+ services: kubernetes/portal/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
policy_app:
type: cloudify.onap.kubernetes.App
properties:
name: policy
+ values: kubernetes/policy/values.yaml
resources:
- - kubernetes/policy/dep-drools.yaml
- - kubernetes/policy/dep-nexus.yaml
- - kubernetes/policy/dep-brmsgw.yaml
- - kubernetes/policy/dep-pdp.yaml
- - kubernetes/policy/dep-pap.yaml
- - kubernetes/policy/dep-maria.yaml
- - kubernetes/policy/dep-pypdp.yaml
- services: kubernetes/policy/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/policy/templates/dep-drools.yaml
+ - kubernetes/policy/templates/dep-nexus.yaml
+ - kubernetes/policy/templates/dep-brmsgw.yaml
+ - kubernetes/policy/templates/dep-pdp.yaml
+ - kubernetes/policy/templates/dep-pap.yaml
+ - kubernetes/policy/templates/dep-maria.yaml
+ - kubernetes/policy/templates/dep-pypdp.yaml
+ services: kubernetes/policy/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment
appc_app:
type: cloudify.onap.kubernetes.App
properties:
name: appc
+ values: kubernetes/appc/values.yaml
resources:
- - kubernetes/appc/appc-deployment.yaml
- - kubernetes/appc/dgbuilder-deployment.yaml
- - kubernetes/appc/db-deployment.yaml
- services: kubernetes/appc/all-services.yaml
- inputs: *app_inputs
+ - kubernetes/appc/templates/appc-deployment.yaml
+ - kubernetes/appc/templates/dgbuilder-deployment.yaml
+ - kubernetes/appc/templates/db-deployment.yaml
+ services: kubernetes/appc/templates/all-services.yaml
+ options: *app_options
relationships:
- type: cloudify.kubernetes.relationships.managed_by_master
target: kubernetes_master
- type: cloudify.relationships.depends_on
- target: init_pod
+ target: onap_environment