Merge "remove passing auth token via env var"
authorBorislav Glozman <Borislav.Glozman@amdocs.com>
Thu, 22 Mar 2018 08:53:13 +0000 (08:53 +0000)
committerGerrit Code Review <gerrit@onap.org>
Thu, 22 Mar 2018 08:53:13 +0000 (08:53 +0000)
121 files changed:
kubernetes/clamp/Chart.yaml
kubernetes/clamp/charts/mariadb/Chart.yaml [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/NOTES.txt [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf [moved from kubernetes/clamp/resources/config/mariadb/conf.d/conf1/my.cnf with 100% similarity]
kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql [moved from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql with 100% similarity]
kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql [moved from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql with 100% similarity]
kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql [moved from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql with 100% similarity]
kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh [moved from kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh with 100% similarity]
kubernetes/clamp/charts/mariadb/templates/configmap.yaml [moved from kubernetes/clamp/templates/clamp-mariadb-configmap.yaml with 81% similarity]
kubernetes/clamp/charts/mariadb/templates/deployment.yaml [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/templates/pv.yaml [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/templates/pvc.yaml [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/templates/secrets.yaml [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/templates/service.yaml [new file with mode: 0644]
kubernetes/clamp/charts/mariadb/values.yaml [new file with mode: 0644]
kubernetes/clamp/requirements.yaml [new file with mode: 0644]
kubernetes/clamp/templates/NOTES.txt [new file with mode: 0644]
kubernetes/clamp/templates/all-services.yaml [deleted file]
kubernetes/clamp/templates/clamp-deployment.yaml [deleted file]
kubernetes/clamp/templates/clamp-mariadb-deployment.yaml [deleted file]
kubernetes/clamp/templates/clamp-pv-pvc.yaml [deleted file]
kubernetes/clamp/templates/configmap.yaml [new file with mode: 0644]
kubernetes/clamp/templates/deployment.yaml [new file with mode: 0644]
kubernetes/clamp/templates/service.yaml [new file with mode: 0644]
kubernetes/clamp/values.yaml
kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl [deleted file]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh [deleted file]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh [deleted file]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh [deleted file]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh [deleted file]
kubernetes/consul/resources/config/consul-agent-config/aai-data-router-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-data-router-health.json with 78% similarity]
kubernetes/consul/resources/config/consul-agent-config/aai-hbase-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-hbase-health.json with 85% similarity]
kubernetes/consul/resources/config/consul-agent-config/aai-model-loader-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-model-loader-health.json with 76% similarity]
kubernetes/consul/resources/config/consul-agent-config/aai-search-data-service-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-search-data-service-health.json with 56% similarity]
kubernetes/consul/resources/config/consul-agent-config/aai-services-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-services-health.json with 71% similarity]
kubernetes/consul/resources/config/consul-agent-config/aai-sparky-be-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-sparky-be-health.json with 77% similarity]
kubernetes/consul/resources/config/consul-agent-config/aai-tabular-backend-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/aai-tabular-backend-health.json with 77% similarity]
kubernetes/consul/resources/config/consul-agent-config/appc-dbbuilder.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-dbbuilder.json with 88% similarity]
kubernetes/consul/resources/config/consul-agent-config/appc-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-health.json with 77% similarity]
kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb01-healthcheck.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb01-healthcheck.json with 63% similarity]
kubernetes/consul/resources/config/consul-agent-config/appc-sdnctldb02-healthcheck.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnctldb02-healthcheck.json with 63% similarity]
kubernetes/consul/resources/config/consul-agent-config/appc-sdnhost.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json with 85% similarity]
kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.crt.pem [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.crt.pem with 100% similarity]
kubernetes/consul/resources/config/consul-agent-config/certs/client-cert-onap.key.pem [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/certs/client-cert-onap.key.pem with 100% similarity]
kubernetes/consul/resources/config/consul-agent-config/log-elastic-search.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json with 75% similarity]
kubernetes/consul/resources/config/consul-agent-config/log-kibana.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json with 81% similarity]
kubernetes/consul/resources/config/consul-agent-config/log-logstash.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json with 73% similarity]
kubernetes/consul/resources/config/consul-agent-config/model-loader.properties [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/model-loader.properties with 100% similarity]
kubernetes/consul/resources/config/consul-agent-config/mr-dmaap-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-dmaap-health.json with 68% similarity]
kubernetes/consul/resources/config/consul-agent-config/mr-kafka-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-kafka-health.json with 69% similarity]
kubernetes/consul/resources/config/consul-agent-config/mr-zookeeper-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mr-zookeeper-health.json with 68% similarity]
kubernetes/consul/resources/config/consul-agent-config/msb-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json with 55% similarity]
kubernetes/consul/resources/config/consul-agent-config/mso-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-health.json with 73% similarity]
kubernetes/consul/resources/config/consul-agent-config/mso-mariabdb.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/mso-mariabdb.json with 77% similarity]
kubernetes/consul/resources/config/consul-agent-config/multicloud-health-check.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json with 78% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-doc.txt with 100% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh [new file with mode: 0755]
kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh [new file with mode: 0755]
kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/data-router-script.sh with 50% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/model-loader-script.sh with 51% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-kafka-health.sh with 51% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mr-zookeeper-health.sh with 52% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-api-script.sh with 81% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-camunda-script.sh with 83% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-jra-script.sh with 82% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/mso-mariadb-script.sh with 51% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-be-script.sh with 87% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-cs-script.sh with 88% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-fe-script.sh with 87% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdc-titan-script.sh with 88% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh [new file with mode: 0755]
kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/search-data-service-availability.sh with 87% similarity, mode: 0644]
kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sparky-be-script.sh with 53% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/tabular-db-availability.sh with 90% similarity]
kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/vid-mariadb-script.sh with 51% similarity, mode: 0755]
kubernetes/consul/resources/config/consul-agent-config/sdc-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdc-health.json with 77% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-dbhost.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dbhost.json with 78% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-dgbuilder.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-dgbuilder.json with 88% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-health.json with 84% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-portal-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-portal-health.json with 86% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb01-healthcheck.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb01-healthcheck.json with 65% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnctldb02-healthcheck.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnctldb02-healthcheck.json with 65% similarity]
kubernetes/consul/resources/config/consul-agent-config/sdnc-sdnhost.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json with 86% similarity]
kubernetes/consul/resources/config/consul-agent-config/vfc-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json with 50% similarity]
kubernetes/consul/resources/config/consul-agent-config/vid-health.json [moved from kubernetes/config/docker/init/src/config/consul/consul-agent-config/vid-health.json with 82% similarity]
kubernetes/consul/templates/consul-agent-configmap.yaml [new file with mode: 0644]
kubernetes/consul/templates/consul-agent-deployment.yaml
kubernetes/consul/templates/consul-server-deployment.yaml
kubernetes/consul/values.yaml
kubernetes/esr/Chart.yaml
kubernetes/esr/resources/config/log/esrserver/logback.xml [new file with mode: 0644]
kubernetes/esr/resources/config/log/filebeat/filebeat.yml [new file with mode: 0644]
kubernetes/esr/templates/esr-esrserver-deployment.yaml
kubernetes/esr/templates/esr-filebeat-configmap.yaml [new file with mode: 0644]
kubernetes/esr/templates/esr-server-log-configmap.yaml [new file with mode: 0644]
kubernetes/esr/values.yaml
kubernetes/helm/starters/onap-app/.helmignore [new file with mode: 0644]
kubernetes/helm/starters/onap-app/Chart.yaml [new file with mode: 0644]
kubernetes/helm/starters/onap-app/README.md [new file with mode: 0644]
kubernetes/helm/starters/onap-app/requirements.yaml [new file with mode: 0644]
kubernetes/helm/starters/onap-app/resources/config/README.txt [new file with mode: 0644]
kubernetes/helm/starters/onap-app/resources/config/application.properties [new file with mode: 0644]
kubernetes/helm/starters/onap-app/templates/NOTES.txt [new file with mode: 0644]
kubernetes/helm/starters/onap-app/templates/configmap.yaml [new file with mode: 0644]
kubernetes/helm/starters/onap-app/templates/deployment.yaml [new file with mode: 0644]
kubernetes/helm/starters/onap-app/templates/service.yaml [new file with mode: 0644]
kubernetes/helm/starters/onap-app/values.yaml [new file with mode: 0644]
kubernetes/msb/values.yaml
kubernetes/onap/requirements.yaml
kubernetes/onap/values.yaml
kubernetes/robot/.helmignore
kubernetes/robot/Chart.yaml
kubernetes/robot/all-services.yaml [deleted file]
kubernetes/robot/requirements.yaml [new file with mode: 0644]
kubernetes/robot/templates/NOTES.txt [new file with mode: 0644]
kubernetes/robot/templates/all-services.yaml [deleted file]
kubernetes/robot/templates/configmap.yaml [moved from kubernetes/robot/templates/robot-dep-configmap.yaml with 54% similarity]
kubernetes/robot/templates/deployment.yaml [new file with mode: 0644]
kubernetes/robot/templates/robot-deployment.yaml [deleted file]
kubernetes/robot/templates/service.yaml [new file with mode: 0644]
kubernetes/robot/values.yaml

index a162ca7..2f68750 100644 (file)
@@ -1,4 +1,4 @@
 apiVersion: v1
-description: A Helm chart for Kubernetes
+description: ONAP Clamp
 name: clamp
-version: 1.1.0
+version: 2.0.0
diff --git a/kubernetes/clamp/charts/mariadb/Chart.yaml b/kubernetes/clamp/charts/mariadb/Chart.yaml
new file mode 100644 (file)
index 0000000..da9cab3
--- /dev/null
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: MariaDB Service
+name: mariadb
+version: 2.0.0
diff --git a/kubernetes/clamp/charts/mariadb/NOTES.txt b/kubernetes/clamp/charts/mariadb/NOTES.txt
new file mode 100644 (file)
index 0000000..91d8ed4
--- /dev/null
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+  http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
@@ -3,7 +3,7 @@ apiVersion: v1
 kind: ConfigMap
 metadata:
   name: clamp-entrypoint-initdb-configmap
-  namespace: {{ .Values.nsPrefix }}
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/*").AsConfig . | indent 2 }}
 ---
@@ -11,7 +11,7 @@ apiVersion: v1
 kind: ConfigMap
 metadata:
   name: clamp-entrypoint-drop-configmap
-  namespace: {{ .Values.nsPrefix }}
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/drop/*").AsConfig . | indent 2 }}
 ---
@@ -19,7 +19,7 @@ apiVersion: v1
 kind: ConfigMap
 metadata:
   name: clamp-entrypoint-bulkload-configmap
-  namespace: {{ .Values.nsPrefix }}
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/*").AsConfig . | indent 2 }}
 ---
@@ -27,7 +27,7 @@ apiVersion: v1
 kind: ConfigMap
 metadata:
   name: clamp-mariadb-conf-configmap
-  namespace: {{ .Values.nsPrefix }}
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/mariadb/conf.d/conf1/*").AsConfig . | indent 2 }}
 #{{ end }}
diff --git a/kubernetes/clamp/charts/mariadb/templates/deployment.yaml b/kubernetes/clamp/charts/mariadb/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..1e17b9b
--- /dev/null
@@ -0,0 +1,94 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  template:
+    metadata:
+      labels:
+        app: {{ include "common.name" . }}
+        release: {{ .Release.Name }}
+    spec:
+      containers:
+        - name: {{ include "common.name" .  }}
+          image: {{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}
+          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          ports:
+          - containerPort: {{ .Values.service.internalPort }}
+          # disable liveness probe when breakpoints set in debugger
+          # so K8s doesn't restart unresponsive container
+          {{- if eq .Values.liveness.enabled true }}
+          livenessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.liveness.periodSeconds }}
+          {{ end -}}
+          readinessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readiness.periodSeconds }}
+          env:
+            - name: MYSQL_ROOT_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: {{ template "common.fullname" . }}
+                  key: db-root-password
+          volumeMounts:
+          - mountPath: /docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh
+            name: docker-entrypoint-initdb
+            subPath: load-sql-files-tests-automation.sh
+          - mountPath: /etc/localtime
+            name: localtime
+            readOnly: true
+          - mountPath: /docker-entrypoint-initdb.d/drop/
+            name: docker-entrypoint-clds-drop
+          - mountPath: /docker-entrypoint-initdb.d/bulkload/
+            name: docker-entrypoint-bulkload
+          - mountPath: /etc/mysql/conf.d/conf1/
+            name:  clamp-mariadb-conf
+          - mountPath: /var/lib/mysql
+            name: clamp-mariadb-data
+          resources:
+{{ toYaml .Values.resources | indent 12 }}
+        {{- if .Values.nodeSelector }}
+        nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+        {{- end -}}
+        {{- if .Values.affinity }}
+        affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+        {{- end }}
+      volumes:
+      {{- if .Values.persistence.enabled }}
+        - name: clamp-mariadb-data
+          persistentVolumeClaim:
+            claimName: {{ include "common.fullname" . }}
+      {{- else }}
+          emptyDir: {}
+      {{- end }}
+        - name: docker-entrypoint-initdb
+          configMap:
+            name: clamp-entrypoint-initdb-configmap
+        - name: docker-entrypoint-clds-drop
+          configMap:
+            name: clamp-entrypoint-drop-configmap
+        - name: docker-entrypoint-bulkload
+          configMap:
+            name: clamp-entrypoint-bulkload-configmap
+        - name: clamp-mariadb-conf
+          configMap:
+            name: clamp-mariadb-conf-configmap
+        - name: localtime
+          hostPath:
+            path: /etc/localtime
+      imagePullSecrets:
+      - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/clamp/charts/mariadb/templates/pv.yaml b/kubernetes/clamp/charts/mariadb/templates/pv.yaml
new file mode 100644 (file)
index 0000000..31230a9
--- /dev/null
@@ -0,0 +1,21 @@
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+    name: {{ include "common.fullname" . }}
+spec:
+  capacity:
+    storage: {{ .Values.persistence.size}}
+  accessModes:
+    - {{ .Values.persistence.accessMode }}
+  persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
+  hostPath:
+    path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
+{{- end -}}
diff --git a/kubernetes/clamp/charts/mariadb/templates/pvc.yaml b/kubernetes/clamp/charts/mariadb/templates/pvc.yaml
new file mode 100644 (file)
index 0000000..b0cd3bf
--- /dev/null
@@ -0,0 +1,32 @@
+{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+  annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+  selector:
+    matchLabels:
+      name: {{ include "common.fullname" . }}
+  accessModes:
+    - {{ .Values.persistence.accessMode }}
+  resources:
+    requests:
+      storage: {{ .Values.persistence.size }}
+{{- if .Values.persistence.storageClass }}
+{{- if (eq "-" .Values.persistence.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/clamp/charts/mariadb/templates/secrets.yaml b/kubernetes/clamp/charts/mariadb/templates/secrets.yaml
new file mode 100644 (file)
index 0000000..4734d1f
--- /dev/null
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Secret
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+type: Opaque
+data:
+  db-root-password: {{ .Values.config.mysqlPassword | b64enc | quote }}
diff --git a/kubernetes/clamp/charts/mariadb/templates/service.yaml b/kubernetes/clamp/charts/mariadb/templates/service.yaml
new file mode 100644 (file)
index 0000000..94ec469
--- /dev/null
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    {{if eq .Values.service.type "NodePort" -}}
+    - port: {{ .Values.service.internalPort }}
+      nodePort: {{ .Values.global.nodePortPrefix | default "302" }}{{ .Values.service.nodePort }}
+    {{- else -}}
+    - port: {{ .Values.service.externalPort }}
+      targetPort: {{ .Values.service.internalPort }}
+    {{- end}}
+      name: {{ .Values.service.name }}
+  selector:
+    app: {{ include "common.name" . }}
+    release: {{ .Release.Name }}
\ No newline at end of file
diff --git a/kubernetes/clamp/charts/mariadb/values.yaml b/kubernetes/clamp/charts/mariadb/values.yaml
new file mode 100644 (file)
index 0000000..c525fec
--- /dev/null
@@ -0,0 +1,91 @@
+#################################################################
+# Global configuration defaults.
+#################################################################
+global: # global defaults
+  nodePortPrefix: 302
+  repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
+
+  persistence: {}
+
+# application image
+repository: nexus3.onap.org:10001
+image: mariadb:10.1.11
+pullPolicy: Always
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+config:
+  mysqlPassword: strong_pitchou
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  # necessary to disable liveness probe when setting breakpoints
+  # in debugger so K8s doesn't restart unresponsive container
+  enabled: true
+readiness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+
+## Persist data to a persitent volume
+persistence:
+  enabled: true
+
+  ## A manually managed Persistent Volume and Claim
+  ## Requires persistence.enabled: true
+  ## If defined, PVC must be created manually before volume will be bound
+  # existingClaim:
+  volumeReclaimPolicy: Retain
+
+  ## database data Persistent Volume Storage Class
+  ## If defined, storageClassName: <storageClass>
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning
+  ## If undefined (the default) or set to null, no storageClassName spec is
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
+  ##   GKE, AWS & OpenStack)
+  ##
+  # storageClass: "-"
+  accessMode: ReadWriteMany
+  size: 2Gi
+  mountPath: /dockerdata-nfs
+  mountSubPath: clamp/mariadb/data
+
+service:
+  type: ClusterIP
+  name: mariadb
+  internalPort: 3306
+  externalPort: 3306
+
+
+ingress:
+  enabled: false
+
+
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  #
+  # Example:
+  # Configure resource requests and limits
+  # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  # Minimum memory for development is 2 CPU cores and 4GB memory
+  # Minimum memory for production is 4 CPU cores and 8GB memory
+#resources:
+#  limits:
+#    cpu: 2
+#    memory: 4Gi
+#  requests:
+#    cpu: 2
+#    memory: 4Gi
diff --git a/kubernetes/clamp/requirements.yaml b/kubernetes/clamp/requirements.yaml
new file mode 100644 (file)
index 0000000..56029ab
--- /dev/null
@@ -0,0 +1,7 @@
+dependencies:
+  - name: common
+    version: ~2.0.0
+    # local reference to common chart, as it is
+    # a part of this chart's package and will not
+    # be published independently to a repo (at this point)
+    repository: '@local'
\ No newline at end of file
diff --git a/kubernetes/clamp/templates/NOTES.txt b/kubernetes/clamp/templates/NOTES.txt
new file mode 100644 (file)
index 0000000..91d8ed4
--- /dev/null
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+  http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/clamp/templates/all-services.yaml b/kubernetes/clamp/templates/all-services.yaml
deleted file mode 100644 (file)
index c9f4f5e..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#{{ if not .Values.disableClampClampMariadb }}
-apiVersion: v1
-kind: Service
-metadata:
-  name: clamp-mariadb
-  namespace: "{{ .Values.nsPrefix }}"
-  labels:
-    app: clamp-mariadb
-spec:
-  ports:
-  - name: clamp-mariadb
-    port: 3306
-  selector:
-    app: clamp-mariadb
-  clusterIP: None
-#{{ end }}
-#{{ if not .Values.disableClampClamp }}
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: clamp
-  namespace: "{{ .Values.nsPrefix }}"
-  annotations:
-    msb.onap.org/service-info: '[
-      {
-          "serviceName": "clamp",
-          "version": "v1",
-          "url": "/restservices/clds/v1",
-          "protocol": "REST",
-          "port": "8080",
-          "visualRange":"1"
-      }
-      ]'
-spec:
-  ports:
-  - name: clamp
-    port: 8080
-    nodePort: {{ .Values.nodePortPrefix }}95
-  selector:
-    app: clamp
-  type: NodePort
-#{{ end }}
\ No newline at end of file
diff --git a/kubernetes/clamp/templates/clamp-deployment.yaml b/kubernetes/clamp/templates/clamp-deployment.yaml
deleted file mode 100644 (file)
index 2c2e13f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#{{ if not .Values.disableClampClamp }}
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  labels:
-    app: clamp
-  name: clamp
-  namespace: "{{ .Values.nsPrefix }}"
-spec:
-  replicas: {{ .Values.clampReplicas }}
-  selector:
-    matchLabels:
-      app: clamp
-  template:
-    metadata:
-      labels:
-        app: clamp
-      name: clamp
-    spec:
-      initContainers:
-      - command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - clamp-mariadb
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: {{ .Values.image.readiness }}
-        imagePullPolicy: {{ .Values.pullPolicy }}
-        name: clamp-readiness
-      containers:
-      - env:
-        - name: SPRING_APPLICATION_JSON
-          value: '{
-  "spring.datasource.camunda.url": "jdbc:mariadb:sequential://clamp-mariadb.{{ .Values.nsPrefix }}:3306/camundabpm?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647",
-  "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clamp-mariadb.{{ .Values.nsPrefix }}:3306/cldsdb4?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647"
-  }'
-        image: {{ .Values.image.clampImage }}:{{ .Values.image.clampVersion }}
-        imagePullPolicy: {{ .Values.pullPolicy }}
-        name: clamp
-        readinessProbe:
-          tcpSocket:
-            port: 8080
-          initialDelaySeconds: 5
-          periodSeconds: 10
-      imagePullSecrets:
-      - name: {{ .Values.nsPrefix }}-docker-registry-key
-#{{ end }}
diff --git a/kubernetes/clamp/templates/clamp-mariadb-deployment.yaml b/kubernetes/clamp/templates/clamp-mariadb-deployment.yaml
deleted file mode 100644 (file)
index a12489e..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-#{{ if not .Values.disableClampClampMariadb }}
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: clamp-mariadb
-  namespace: "{{ .Values.nsPrefix }}"
-spec:
-  replicas: {{ .Values.clampMariaDbReplicas }}
-  selector:
-    matchLabels:
-      app: clamp-mariadb
-  template:
-    metadata:
-      labels:
-        app: clamp-mariadb
-      name: clamp-mariadb
-    spec:
-      hostname: clamp-mariadb
-      containers:
-      - args:
-        image: {{ .Values.image.mariadbImage }}:{{ .Values.image.mariadbVersion }}
-        imagePullPolicy: {{ .Values.pullPolicy }}
-        name: "clamp-mariadb"
-        env:
-          - name: MYSQL_ROOT_PASSWORD
-            value: {{ .Values.mysqlPassword }}
-        volumeMounts:
-        - mountPath: /docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh
-          name: docker-entrypoint-initdb
-          subPath: load-sql-files-tests-automation.sh
-        - mountPath: /docker-entrypoint-initdb.d/drop/
-          name: docker-entrypoint-clds-drop
-        - mountPath: /docker-entrypoint-initdb.d/bulkload/
-          name: docker-entrypoint-bulkload
-        - mountPath: /etc/mysql/conf.d/conf1/
-          name:  clamp-mariadb-conf
-        - mountPath: /var/lib/mysql
-          name: clamp-mariadb-data
-        readinessProbe:
-          tcpSocket:
-            port: 3306
-          initialDelaySeconds: 5
-          periodSeconds: 10
-      volumes:
-        - name: docker-entrypoint-initdb
-          configMap:
-            name: clamp-entrypoint-initdb-configmap
-        - name: docker-entrypoint-clds-drop
-          configMap:
-            name: clamp-entrypoint-drop-configmap
-        - name: docker-entrypoint-bulkload
-          configMap:
-            name: clamp-entrypoint-bulkload-configmap
-        - name: clamp-mariadb-conf
-          configMap:
-            name: clamp-mariadb-conf-configmap
-        - name: clamp-mariadb-data
-          persistentVolumeClaim:
-            claimName: clamp-db
-      imagePullSecrets:
-      - name: {{ .Values.nsPrefix }}-docker-registry-key
-#{{ end }}
diff --git a/kubernetes/clamp/templates/clamp-pv-pvc.yaml b/kubernetes/clamp/templates/clamp-pv-pvc.yaml
deleted file mode 100644 (file)
index c542de6..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#{{ if not .Values.disableClampClampMariadb }}
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: "{{ .Values.nsPrefix }}-clamp"
-  namespace: "{{ .Values.nsPrefix }}"
-  labels:
-    name: "{{ .Values.nsPrefix }}-clamp"
-spec:
-  capacity:
-    storage: 2Gi
-  accessModes:
-    - ReadWriteMany
-  persistentVolumeReclaimPolicy: Retain
-  hostPath:
-    path: {{ .Values.dataRootDir }}/{{ .Values.nsPrefix }}/clamp/mariadb/data
----
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: clamp-db
-  namespace: "{{ .Values.nsPrefix }}"
-spec:
-  accessModes:
-    - ReadWriteMany
-  resources:
-    requests:
-      storage: 2Gi
-  selector:
-    matchLabels:
-      name: "{{ .Values.nsPrefix }}-clamp"
-#{{ end }}
diff --git a/kubernetes/clamp/templates/configmap.yaml b/kubernetes/clamp/templates/configmap.yaml
new file mode 100644 (file)
index 0000000..1de3b2f
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+data:
+ spring_application_json: {{ .Values.config.springApplicationJson | quote }}
diff --git a/kubernetes/clamp/templates/deployment.yaml b/kubernetes/clamp/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..d701e25
--- /dev/null
@@ -0,0 +1,71 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  template:
+    metadata:
+      labels:
+        app: {{ include "common.name" . }}
+        release: {{ .Release.Name }}
+    spec:
+      initContainers:
+      - command:
+        - /root/ready.py
+        args:
+        - --container-name
+        - {{ .Values.mariadb.nameOverride }}
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        name: {{ include "common.name" . }}-readiness
+      containers:
+        - name: {{ include "common.name" . }}
+          image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}"
+          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          ports:
+          - containerPort: {{ .Values.service.internalPort }}
+          # disable liveness probe when breakpoints set in debugger
+          # so K8s doesn't restart unresponsive container
+          {{- if eq .Values.liveness.enabled true }}
+          livenessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.liveness.periodSeconds }}
+          {{ end -}}
+          readinessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readiness.periodSeconds }}
+          env:
+            - name: SPRING_APPLICATION_JSON
+              valueFrom:
+                configMapKeyRef:
+                  name: {{ template "common.fullname" . }}
+                  key: spring_application_json
+          resources:
+{{ toYaml .Values.resources | indent 12 }}
+        {{- if .Values.nodeSelector }}
+        nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+        {{- end -}}
+        {{- if .Values.affinity }}
+        affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+        {{- end }}
+      imagePullSecrets:
+      - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/clamp/templates/service.yaml b/kubernetes/clamp/templates/service.yaml
new file mode 100644 (file)
index 0000000..91485a1
--- /dev/null
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    {{if eq .Values.service.type "NodePort" -}}
+    - port: {{ .Values.service.internalPort }}
+      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+      name: {{ .Values.service.name }}
+    {{- else -}}
+    - port: {{ .Values.service.externalPort }}
+      targetPort: {{ .Values.service.internalPort }}
+      name: {{ .Values.service.name }}
+    {{- end}}
+  selector:
+    app: {{ include "common.name" . }}
+    release: {{ .Release.Name }}
index 103472a..e077531 100644 (file)
@@ -1,13 +1,85 @@
-nsPrefix: onap
+#################################################################
+# Global configuration defaults.
+#################################################################
+global: # global defaults
+  nodePortPrefix: 302
+  repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
+  readinessRepository: oomk8s
+  readinessImage: readiness-check:1.0.0
+  loggingRepository: docker.elastic.co
+  loggingImage: beats/filebeat:5.5.0
+
+subChartsOnly:
+  enabled: true
+
+# application image
+repository: nexus3.onap.org:10001
+image: onap/clamp
 pullPolicy: Always
-nodePortPrefix: 302
-mysqlPassword: strong_pitchou
-dataRootDir: /dockerdata-nfs
-clampReplicas: 1
-clampMariaDbReplicas: 1
-image:
-  readiness: oomk8s/readiness-check:1.1.0
-  clampImage: nexus3.onap.org:10001/onap/clamp
-  clampVersion: v1.1.0
-  mariadbImage: nexus3.onap.org:10001/mariadb
-  mariadbVersion: 10.1.11
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+config:
+  mysqlPassword: strong_pitchou
+  dataRootDir: /dockerdata-nfs
+  springApplicationJson: '{
+  "spring.datasource.camunda.url": "jdbc:mariadb:sequential://clamp-mariadb.{{include "common.namespace" .}}:3306/camundabpm?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647",
+  "spring.datasource.cldsdb.url": "jdbc:mariadb:sequential://clamp-mariadb.{{include "common.namespace" .}}:3306/cldsdb4?verifyServerCertificate=false&useSSL=false&requireSSL=false&autoReconnect=true&retriesAllDown=2147483647&failoverLoopRetries=2147483647"
+  }'
+
+# subchart configuration
+mariadb:
+  nameOverride: clampdb
+
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  # necessary to disable liveness probe when setting breakpoints
+  # in debugger so K8s doesn't restart unresponsive container
+  enabled: true
+
+readiness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+
+
+service:
+  type: NodePort
+  name: clamp
+  internalPort: 8080
+  nodePort: 95
+
+ingress:
+  enabled: false
+
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  #
+  # Example:
+  # Configure resource requests and limits
+  # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  # Minimum memory for development is 2 CPU cores and 4GB memory
+  # Minimum memory for production is 4 CPU cores and 8GB memory
+#resources:
+#  limits:
+#    cpu: 2
+#    memory: 4Gi
+#  requests:
+#    cpu: 2
+#    memory: 4Gi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl
deleted file mode 100755 (executable)
index d53ce5f..0000000
Binary files a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/bin/kubectl and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/aai-search-storage-write-script.sh
deleted file mode 100755 (executable)
index fce0a63..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-if curl -s -X PUT http://elasticsearch.namespace-placeholder:9200/searchhealth/stats/testwrite -d @/consul/config/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then
-   if curl -s -X DELETE http://elasticsearch.namespace-placeholder:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then
-      if curl -s -X GET http://elasticsearch.namespace-placeholder:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then
-         echo Successful PUT, DELETE, GET from Search Document Storage 2>&1
-         exit 0
-      else
-         echo Failed GET from Search Document Storage 2>&1
-         exit 1
-      fi
-   else
-      echo Failed DELETE from Search Document Storage 2>&1
-      exit 1
-   fi
-else
-   echo Failed PUT from Search Document Storage 2>&1
-   exit 1
-fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/appc-dbhost-script.sh
deleted file mode 100755 (executable)
index 9abfd49..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-APPC_DBHOST_POD=$(/consul/config/bin/kubectl -n namespace-placeholder  get pod | grep -o "appc-dbhost-[^[:space:]]*")
-if [ -n "$APPC_DBHOST_POD" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
-      echo Success. APPC DBHost is running. 2>&1
-      exit 0
-   else
-      echo Failed. APPC DBHost is not running. 2>&1
-      exit 1
-   fi
-else
-   echo Failed. APPC DBHost is offline. 2>&1
-   exit 1
-fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/gremlin-script.sh
deleted file mode 100755 (executable)
index e55c90f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-
-NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "gremlin[^[:space:]]*")
-
-if [ -n "$NAME" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'gremlin-server' > /dev/null; then
-
-      echo Success. Gremlin Server process is running. 2>&1
-      exit 0
-   else
-      echo Failed. Gremlin Server process is not running. 2>&1
-      exit 1
-   fi
-else
-   echo Failed. Gremlin Server container is offline. 2>&1
-   exit 1
-fi
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/scripts/sdnc-dbhost-script.sh
deleted file mode 100755 (executable)
index 8a523ce..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-SDNC_DBHOST_POD=$(/consul/config/bin/kubectl -n namespace-placeholder  get pod | grep -o "sdnc-dbhost-[^[:space:]]*")
-if [ -n "$SDNC_DBHOST_POD" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
-      echo Success. SDNC DBHost is running. 2>&1
-      exit 0
-   else
-      echo Failed. SDNC DBHost is not running. 2>&1
-      exit 1
-   fi
-else
-   echo Failed. SDNC DBHost is offline. 2>&1
-   exit 1
-fi
@@ -5,7 +5,7 @@
       {
         "id": "data-router-process",
         "name": "Synapse Presence",
-        "script": "/consul/config/scripts/data-router-script.sh",
+        "script": "/consul/scripts/data-router-script.sh",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "hbase-aai",
         "name": "HBase Health Check",
-        "http": "http://hbase.namespace-placeholder:8080/status/cluster",
+        "http": "http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster",
         "method": "GET",
         "header": {
           "Cache-Control": ["no-cache"],
@@ -5,7 +5,7 @@
       {
         "id": "model-loader-process",
         "name": "Model Loader Presence",
-        "script": "/consul/config/scripts/model-loader-script.sh",
+        "script": "/consul/scripts/model-loader-script.sh",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,26 +5,26 @@
       {
         "id": "elasticsearch",
         "name": "Search Data Service Document Store",
-        "http": "http://aai-elasticsearch.namespace-placeholder:9200/_cat/indices?v",
+        "http": "http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/_cat/indices?v",
         "interval": "15s",
         "timeout": "1s"
       },
       {
         "id": "elasticsearch-write-health",
         "name": "Search Data Service Document Store Write Test",
-        "script": "/consul/config/scripts/aai-search-storage-write-script.sh",
+        "script": "/consul/scripts/aai-search-storage-write-script.sh",
         "interval": "60s"
       },
       {
         "id": "search-data-service-availability",
         "name": "Search Data Service Availability",
-        "script": "curl -k  --cert /consul/config/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/config/bin/client-cert-onap.key.pem --key-type PEM https://search-data-service.namespace-placeholder:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'",
+        "script": "curl -k  --cert /consul/certs/client-cert-onap.crt.pem --cert-type PEM --key /consul/certs/client-cert-onap.key.pem --key-type PEM https://search-data-service.{{ .Values.nsPrefix }}:9509/services/search-data-service/v1/jaxrsExample/jaxrs-services/echo/up 2>&1 | grep 'Up'",
         "interval": "15s"
       },
       {
         "id": "search-data-service-api",
         "name": "Search Data Service Operational Test",
-        "script": "/consul/config/scripts/search-data-service-availability.sh",
+        "script": "/consul/scripts/search-data-service-availability.sh",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "aai-service",
         "name": "Core A&AI",
-        "http": "https://aai-service.namespace-placeholder:8443/aai/util/echo",
+        "http": "https://aai-service.{{ .Values.nsPrefix }}:8443/aai/util/echo",
         "header": {
           "Authorization": ["Basic QUFJOkFBSQ=="],
           "X-TransactionId": ["ConsulHealthCheck"],
@@ -18,7 +18,7 @@
       {
         "id": "aai-resources",
         "name": "Resources Microservice",
-        "http": "https://aai-resources.namespace-placeholder:8447/aai/util/echo",
+        "http": "https://aai-resources.{{ .Values.nsPrefix }}:8447/aai/util/echo",
         "header": {
           "Authorization": ["Basic QUFJOkFBSQ=="],
           "X-TransactionId": ["ConsulHealthCheck"],
@@ -31,7 +31,7 @@
       {
         "id": "aai-traversal",
         "name": "Traversal Microservice",
-        "http": "https://aai-traversal.namespace-placeholder:8446/aai/util/echo",
+        "http": "https://aai-traversal.{{ .Values.nsPrefix }}:8446/aai/util/echo",
         "header": {
           "Authorization": ["Basic QUFJOkFBSQ=="],
           "X-TransactionId": ["ConsulHealthCheck"],
         "tls_skip_verify": true,
         "interval": "15s",
         "timeout": "1s"
-      },
-      {
-        "id": "gremlin-server",
-        "name": "Graph Data Store",
-        "script": "/consul/config/scripts/gremlin-script.sh",
-        "interval": "15s",
-        "timeout": "1s"
       }
     ]
   }
@@ -5,7 +5,7 @@
       {
         "id": "sparky-be-process",
         "name": "UI Backend Presence",
-        "script": "/consul/config/scripts/sparky-be-script.sh",
+        "script": "/consul/scripts/sparky-be-script.sh",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "tabular-backend",
         "name": "Tabular Data Store Operational Test",
-        "script": "/consul/config/scripts/tabular-db-availability.sh",
+        "script": "/consul/scripts/tabular-db-availability.sh",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "appc-dgbuilder",
         "name": "APPC-Dgbuilder Server Health Check",
-        "http": "http://appc-dgbuilder.namespace-placeholder:3000/",
+        "http": "http://appc-dgbuilder.{{ .Values.nsPrefix }}:3000/",
         "method": "HEAD",
         "header": {
           "Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="],
@@ -5,7 +5,7 @@
       {
         "id": "appc-dbhost-healthcheck",
         "name": "APPC DBHost Health Check",
-        "script": "/consul/config/scripts/appc-dbhost-script.sh",
+        "script": "/consul/scripts/appc-dbhost-script.sh",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -3,9 +3,9 @@
     "name": "Health Check: APPC-SDN-CTL-DB-01",
     "checks": [
       {
-        "id": "appc-sdnctldb01.namespace-placeholder",
+        "id": "appc-sdnctldb01.{{ .Values.nsPrefix }}",
         "name": "APPC SDNCTLDB01 Health Check",
-        "tcp": "appc-sdnctldb01.namespace-placeholder:3306",
+        "tcp": "appc-sdnctldb01.{{ .Values.nsPrefix }}:3306",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -3,9 +3,9 @@
     "name": "Health Check: APPC-SDN-CTL-DB-02",
     "checks": [
       {
-        "id": "appc-sdnctldb02.namespace-placeholder",
+        "id": "appc-sdnctldb02.{{ .Values.nsPrefix }}",
         "name": "APPC SDNCTLDB02 Health Check",
-        "tcp": "appc-sdnctldb02.namespace-placeholder:3306",
+        "tcp": "appc-sdnctldb02.{{ .Values.nsPrefix }}:3306",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "appc-sdnhost",
         "name": "APPC SDN Host Health Check",
-        "http": "http://appc-sdnhost.namespace-placeholder:8282/apidoc/explorer/index.html",
+        "http": "http://appc-sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html",
         "method": "HEAD",
         "header": {
           "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
@@ -5,7 +5,7 @@
       {
         "id": "log-elasticsearch-server",
         "name": "Log Elastic Search Health Check",
-        "http": "http://elasticsearch.namespace-placeholder:9200/_cluster/health?pretty",
+        "http": "http://elasticsearch.{{ .Values.nsPrefix }}:9200/_cluster/health?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -14,7 +14,7 @@
       {
         "id": "log-elasticsearch-tcp",
         "name": "Log Elastic Search TCP Health Check",
-        "tcp": "elasticsearchtcp.namespace-placeholder:9300",
+        "tcp": "elasticsearchtcp.{{ .Values.nsPrefix }}:9300",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "log-kibana-server",
         "name": "Log kibana Health Check",
-        "http": "http://kibana.namespace-placeholder:5601/status",
+        "http": "http://kibana.{{ .Values.nsPrefix }}:5601/status",
         "method": "HEAD",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -5,7 +5,7 @@
       {
         "id": "log-logstash-internal-server-gi",
         "name": "Log Stash Health Check - General Information",
-        "http": "http://logstashinternal.namespace-placeholder:9600/?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -14,7 +14,7 @@
       {
         "id": "log-logstash-internal-server-node-info",
         "name": "Log Stash Health Check - Node Information",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -23,7 +23,7 @@
       {
         "id": "log-logstash-internal-server-os-info",
         "name": "Log Stash Health Check - OS Information",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/os?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/os?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -32,7 +32,7 @@
       {
         "id": "log-logstash-internal-server-jvm-info",
         "name": "Log Stash Health Check - JVM Information",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/jvm?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/jvm?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -41,7 +41,7 @@
       {
         "id": "log-logstash-internal-server-plugin-info",
         "name": "Log Stash Health Check - Plugin Information",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/plugins?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/plugins?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -50,7 +50,7 @@
       {
         "id": "log-logstash-internal-server-node-stat",
         "name": "Log Stash Health Check - Node Stats",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -59,7 +59,7 @@
       {
         "id": "log-logstash-internal-server-jvm-stat",
         "name": "Log Stash Health Check - JVM Stats",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats/jvm?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/jvm?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -68,7 +68,7 @@
       {
         "id": "log-logstash-internal-server-process-stat",
         "name": "Log Stash Health Check - Process Stats",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats/process?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/process?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -77,7 +77,7 @@
       {
         "id": "log-logstash-internal-server-os-stat",
         "name": "Log Stash Health Check - OS Stats",
-        "http": "http://logstashinternal.namespace-placeholder:9600/_node/stats/os?pretty",
+        "http": "http://logstashinternal.{{ .Values.nsPrefix }}:9600/_node/stats/os?pretty",
         "method": "GET",
         "tls_skip_verify": true,
         "interval": "15s",
@@ -86,7 +86,7 @@
       {
         "id": "log-logstash-tcp",
         "name": "Log Stash File Beat TCP Health Check",
-        "tcp": "logstash.namespace-placeholder:5044",
+        "tcp": "logstash.{{ .Values.nsPrefix }}:5044",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -2,7 +2,7 @@
   "service": {
     "name": "Health Check: Message Router - DMaaP",
     "check": {
-      "http": "http://dmaap.namespace-placeholder:3904/topics",
+      "http": "http://dmaap.{{ .Values.nsPrefix }}:3904/topics",
       "interval": "30s",
       "timeout": "1s"
     }
@@ -2,7 +2,7 @@
  "service": {
    "name": "Health Check: Message Router - Kafka",
    "check": {
-     "script": "/consul/config/scripts/mr-kafka-health.sh",
+     "script": "/consul/scripts/mr-kafka-health.sh",
      "interval": "30s",
      "timeout": "1s"
    }
@@ -2,7 +2,7 @@
  "service": {
    "name": "Health Check: Message Router - ZooKeeper",
    "check": {
-     "script": "/consul/config/scripts/mr-zookeeper-health.sh",
+     "script": "/consul/scripts/mr-zookeeper-health.sh",
      "interval": "30s",
      "timeout": "1s"
    }
@@ -3,34 +3,34 @@
     "name": "Health Check: MSB",
     "checks": [
       {
-        "id": "msb-eag.namespace-placeholder",
+        "id": "msb-eag.{{ .Values.nsPrefix }}",
         "name": "MSB eag Health Check",
-        "http": "http://msb-eag.namespace-placeholder:80/iui/microservices/default.html",
+        "http": "http://msb-eag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html",
         "method": "HEAD",
         "tls_skip_verify": true,
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "msb-iag.namespace-placeholder",
+        "id": "msb-iag.{{ .Values.nsPrefix }}",
         "name": "MSB iag Health Check",
-        "http": "http://msb-iag.namespace-placeholder:80/iui/microservices/default.html",
+        "http": "http://msb-iag.{{ .Values.nsPrefix }}:80/iui/microservices/default.html",
         "method": "HEAD",
         "tls_skip_verify": true,
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "msb-consul.namespace-placeholder",
+        "id": "msb-consul.{{ .Values.nsPrefix }}",
         "name": "MSB consul Health Check",
-        "tcp": "msb-consul.namespace-placeholder:8500",
+        "tcp": "msb-consul.{{ .Values.nsPrefix }}:8500",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "msb-discovery.namespace-placeholder",
+        "id": "msb-discovery.{{ .Values.nsPrefix }}",
         "name": "MSB discovery Health Check",
-        "tcp": "msb-discovery.namespace-placeholder:10081",
+        "tcp": "msb-discovery.{{ .Values.nsPrefix }}:10081",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,21 +5,21 @@
       {
         "id": "mso-api-healthcheck",
         "name": "MSO API Health Check",
-        "script": "/consul/config/scripts/mso-api-script.sh",
+        "script": "/consul/scripts/mso-api-script.sh",
         "interval": "10s",
         "timeout": "1s"
       },
       {
         "id": "mso-camunda-healthcheck",
         "name": "MSO Camunda Health Check",
-        "script": "/consul/config/scripts/mso-camunda-script.sh",
+        "script": "/consul/scripts/mso-camunda-script.sh",
         "interval": "10s",
         "timeout": "1s"
       },
       {
         "id": "mso-jra-healthcheck",
         "name": "MSO JRA Health Check",
-        "script": "/consul/config/scripts/mso-jra-script.sh",
+        "script": "/consul/scripts/mso-jra-script.sh",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
           "id": "mso-mariadb",
           "name": "MSO Mariadb Health Check",
-          "script": "/consul/config/scripts/mso-mariadb-script.sh",
+          "script": "/consul/scripts/mso-mariadb-script.sh",
           "interval": "10s",
           "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "framework",
         "name": "Framework Health Check",
-        "http": "http://framework.namespace-placeholder:9001/api/multicloud/v0/swagger.json",
+        "http": "http://framework.{{ .Values.nsPrefix }}:9001/api/multicloud/v0/swagger.json",
         "method": "HEAD",
         "header": {
           "Cache-Control": ["no-cache"],
@@ -19,7 +19,7 @@
       {
         "id": "multicloud-ocata",
         "name": "Multicloud Ocata Health Check",
-        "http": "http://multicloud-ocata.namespace-placeholder:9006/api/multicloud-ocata/v0/swagger.json",
+        "http": "http://multicloud-ocata.{{ .Values.nsPrefix }}:9006/api/multicloud-ocata/v0/swagger.json",
         "method": "HEAD",
         "header": {
           "Cache-Control": ["no-cache"],
@@ -33,7 +33,7 @@
       {
         "id": "multicloud-vio",
         "name": "Multicloud Vio Health Check",
-        "http": "http://multicloud-vio.namespace-placeholder:9004/api/multicloud-vio/v0/swagger.json",
+        "http": "http://multicloud-vio.{{ .Values.nsPrefix }}:9004/api/multicloud-vio/v0/swagger.json",
         "method": "HEAD",
         "header": {
           "Cache-Control": ["no-cache"],
@@ -47,7 +47,7 @@
       {
         "id": "multicloud-windriver",
         "name": "Multicloud Windriver Health Check",
-        "http": "http://multicloud-windriver.namespace-placeholder:9005/api/multicloud-titanium_cloud/v0/swagger.json",
+        "http": "http://multicloud-windriver.{{ .Values.nsPrefix }}:9005/api/multicloud-titanium_cloud/v0/swagger.json",
         "method": "HEAD",
         "header": {
           "Cache-Control": ["no-cache"],
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
new file mode 100755 (executable)
index 0000000..3d26f6e
--- /dev/null
@@ -0,0 +1,17 @@
+if curl -s -X PUT http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite -d @/consul/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then
+   if curl -s -X DELETE http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then
+      if curl -s -X GET http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then
+         echo Successful PUT, DELETE, GET from Search Document Storage 2>&1
+         exit 0
+      else
+         echo Failed GET from Search Document Storage 2>&1
+         exit 1
+      fi
+   else
+      echo Failed DELETE from Search Document Storage 2>&1
+      exit 1
+   fi
+else
+   echo Failed PUT from Search Document Storage 2>&1
+   exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
new file mode 100755 (executable)
index 0000000..5f91c5e
--- /dev/null
@@ -0,0 +1,13 @@
+APPC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }}  get pod | grep -o "appc-dbhost-[^[:space:]]*")
+if [ -n "$APPC_DBHOST_POD" ]; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+      echo Success. APPC DBHost is running. 2>&1
+      exit 0
+   else
+      echo Failed. APPC DBHost is not running. 2>&1
+      exit 1
+   fi
+else
+   echo Failed. APPC DBHost is offline. 2>&1
+   exit 1
+fi
@@ -1,8 +1,8 @@
 
-NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "data-router[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-data-router[^[:space:]]*")
 
 if [ -n "$NAME" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then
 
       echo Success. Synapse process is running. 2>&1
       exit 0
@@ -1,8 +1,8 @@
 
-NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "model-loader[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-model-loader[^[:space:]]*")
 
 if [ -n "$NAME" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then
 
       echo Success. Model Loader process is running. 2>&1
       exit 0
@@ -1,6 +1,6 @@
-kafkapod=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "global-kafka-[^[:space:]]*")
+kafkapod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-global-kafka-[^[:space:]]*")
 if [ -n "$kafkapod" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $kafkapod -- ps ef | grep -i kafka; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $kafkapod -- ps ef | grep -i kafka; then
       echo Success. Kafka process is running. 2>&1
       exit 0
    else
@@ -1,6 +1,6 @@
-zkpod=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "zookeeper-[^[:space:]]*")
+zkpod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*")
 if [ -n "$zkpod" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $zkpod -- ps ef | grep -i zookeeper; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $zkpod -- ps ef | grep -i zookeeper; then
       echo Success. Zookeeper process is running. 2>&1
       exit 0
    else
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://mso.namespace-placeholder:8080/ecomp/mso/infra/healthcheck"
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/ecomp/mso/infra/healthcheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://mso.namespace-placeholder:8080/mso/healthcheck"
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/mso/healthcheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://mso.namespace-placeholder:8080/networks/rest/healthcheck"
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/networks/rest/healthcheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
@@ -1,7 +1,7 @@
-NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "mso-mariadb[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "mso-mariadb[^[:space:]]*")
 
    if [ -n "$NAME" ]; then
-       if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+       if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
          echo Success. mariadb process is running. 2>&1
          exit 0
       else
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 ## Strip out the ON_BOARDING section from the response XML (otherwise we will
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 ## Strip out the ON_BOARDING section from the response XML (otherwise we will
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 ## Strip out the ON_BOARDING section from the response XML (otherwise we will
@@ -1,5 +1,5 @@
 ## Query the health check API.
-HEALTH_CHECK_ENDPOINT="http://sdc-fe.namespace-placeholder:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
 HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
 
 ## Strip out the ON_BOARDING section from the response XML (otherwise we will
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
new file mode 100755 (executable)
index 0000000..27b9b9f
--- /dev/null
@@ -0,0 +1,13 @@
+SDNC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }}  get pod | grep -o "sdnc-dbhost-[^[:space:]]*")
+if [ -n "$SDNC_DBHOST_POD" ]; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+      echo Success. SDNC DBHost is running. 2>&1
+      exit 0
+   else
+      echo Failed. SDNC DBHost is not running. 2>&1
+      exit 1
+   fi
+else
+   echo Failed. SDNC DBHost is offline. 2>&1
+   exit 1
+fi
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-SEARCH_SERVICE_NAME="search-data-service.namespace-placeholder"
+SEARCH_SERVICE_NAME="search-data-service.{{ .Values.nsPrefix }}"
 SEARCH_SERVICE_PORT=9509
 HEALTH_CHECK_INDEX="healthcheck"
 
@@ -8,13 +8,11 @@ HEALTH_CHECK_INDEX="healthcheck"
 INDEX_URL="https://$SEARCH_SERVICE_NAME:$SEARCH_SERVICE_PORT/services/search-data-service/v1/search/indexes/$HEALTH_CHECK_INDEX"
 INDEX_SCHEMA="{\"fields\":[{\"name\": \"field1\", \"data-type\": \"string\"}]}"
 
-
-SEARCH_CERT_FILE="/consul/config/certs/client-cert-onap.crt.pem"
-SEARCH_KEY_FILE="/consul/config/certs/client-cert-onap.key.pem"
-
+SEARCH_CERT_FILE="/consul/certs/client-cert-onap.crt.pem"
+SEARCH_KEY_FILE="/consul/certs/client-cert-onap.key.pem"
 
 ## Try to create an index via the Search Data Service API.
-CREATE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "$INDEX_SCHEMA" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X PUT $INDEX_URL) 
+CREATE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "$INDEX_SCHEMA" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X PUT $INDEX_URL)
 
 RESULT_STRING=" "
 
@@ -33,13 +31,13 @@ else
 fi
 
 ## Now, clean up after ourselves.
-DELETE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "{ }" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X DELETE $INDEX_URL) 
+DELETE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "{ }" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X DELETE $INDEX_URL)
 
 if [ $DELETE_INDEX_RESP -eq 200 ]; then
    RESULT_STRING="Service Is Able To Communicate With Back End"
-else 
+else
    RESULT_STRING="Service API Failure - $DELETE_INDEX_RESP"
-   echo $RESULT_STRING 
+   echo $RESULT_STRING
    exit 1
 fi
 
@@ -1,8 +1,8 @@
 
-NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "aai-sparky-be[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-sparky-be[^[:space:]]*")
 
 if [ -n "$NAME" ]; then
-   if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then
+   if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then
 
       echo Success. UI Backend Service process is running. 2>&1
       exit 0
@@ -1,6 +1,6 @@
 
 # Query the Hbase service for the cluster status.
-GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://aai-hbase.namespace-placeholder:8080/status/cluster)
+GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster)
 
 if [ -z "$GET_CLUSTER_STATUS_RESPONSE" ]; then
   echo "Tabular store is unreachable."
@@ -1,7 +1,7 @@
-NAME=$(/consul/config/bin/kubectl -n namespace-placeholder get pod | grep -o "vid-mariadb[^[:space:]]*")
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "vid-mariadb[^[:space:]]*")
 
    if [ -n "$NAME" ]; then
-       if /consul/config/bin/kubectl -n namespace-placeholder exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+       if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
          echo Success. mariadb process is running. 2>&1
          exit 0
       else
@@ -5,35 +5,35 @@
       {
         "id": "sdc-fe-healthcheck",
         "name": "SDC Front End Health Check",
-        "script": "/consul/config/scripts/sdc-fe-script.sh",
+        "script": "/consul/scripts/sdc-fe-script.sh",
         "interval": "10s",
         "timeout": "1s"
       },
       {
         "id": "sdc-be-healthcheck",
         "name": "SDC Back End Health Check",
-        "script": "/consul/config/scripts/sdc-be-script.sh",
+        "script": "/consul/scripts/sdc-be-script.sh",
         "interval": "10s",
         "timeout": "1s"
       },
       {
         "id": "sdc-titan-healthcheck",
         "name": "SDC Titan Health Check",
-        "script": "/consul/config/scripts/sdc-titan-script.sh",
+        "script": "/consul/scripts/sdc-titan-script.sh",
         "interval": "10s",
         "timeout": "1s"
       },
       {
         "id": "sdc-cs-healthcheck",
         "name": "SDC Cassandra Health Check",
-        "script": "/consul/config/scripts/sdc-cs-script.sh",
+        "script": "/consul/scripts/sdc-cs-script.sh",
         "interval": "10s",
         "timeout": "1s"
       },
       {
         "id": "sdc-catalog-healthcheck",
         "name": "SDC Catalog Health Check",
-        "http": "https://sdc-be.namespace-placeholder:8443/asdc/v1/catalog/services",
+        "http": "https://sdc-be.{{ .Values.nsPrefix }}:8443/asdc/v1/catalog/services",
         "header": {
           "Authorization": ["Basic dmlkOktwOGJKNFNYc3pNMFdYbGhhazNlSGxjc2UyZ0F3ODR2YW9HR21KdlV5MlU="],
           "X-ECOMP-InstanceID": ["VID"],
@@ -5,7 +5,7 @@
       {
         "id": "sdnc-dbhost-healthcheck",
         "name": "SDNC DBHOST Health Check",
-        "script": "/consul/config/scripts/sdnc-dbhost-script.sh",
+        "script": "/consul/scripts/sdnc-dbhost-script.sh",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "sdnc-dgbuilder",
         "name": "SDNC-DGbuilder Health Check",
-        "http": "http://sdnc-dgbuilder.namespace-placeholder:3000/",
+        "http": "http://sdnc-dgbuilder.{{ .Values.nsPrefix }}:3000/",
         "method": "HEAD",
         "header": {
           "Authorization": ["Basic ZGd1c2VyOnRlc3QxMjM="],
@@ -5,7 +5,7 @@
       {
         "id": "odl-api-healthcheck",
         "name": "SDNC API Health Check",
-        "http": "http://sdnhost.namespace-placeholder:8282/restconf/operations/SLI-API:healthcheck",
+        "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/restconf/operations/SLI-API:healthcheck",
         "method": "POST",
         "header": {
           "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
@@ -5,7 +5,7 @@
       {
         "id": "sdnc-portal",
         "name": "SDNC Portal Health Check",
-        "http": "http://sdnc-portal.namespace-placeholder:8843/login",
+        "http": "http://sdnc-portal.{{ .Values.nsPrefix }}:8843/login",
         "method": "HEAD",
         "header": {
           "Cache-Control": ["no-cache"],
@@ -3,9 +3,9 @@
     "name": "Health Check: SDNC-SDN-CTL-DB-01",
     "checks": [
       {
-        "id": "sdnctldb01.namespace-placeholder",
+        "id": "sdnctldb01.{{ .Values.nsPrefix }}",
         "name": "SDNC SDNCTLDB01 Health Check",
-        "tcp": "sdnctldb01.namespace-placeholder:3306",
+        "tcp": "sdnctldb01.{{ .Values.nsPrefix }}:3306",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -3,9 +3,9 @@
     "name": "Health Check: SDNC-SDN-CTL-DB-02",
     "checks": [
       {
-        "id": "sdnctldb02.namespace-placeholder",
+        "id": "sdnctldb02.{{ .Values.nsPrefix }}",
         "name": "SDNC SDNCTLDB02 Health Check",
-        "tcp": "sdnctldb02.namespace-placeholder:3306",
+        "tcp": "sdnctldb02.{{ .Values.nsPrefix }}:3306",
         "interval": "10s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "sdnc-sdnhost",
         "name": "SDNC SDN Host Health Check",
-        "http": "http://sdnhost.namespace-placeholder:8282/apidoc/explorer/index.html",
+        "http": "http://sdnhost.{{ .Values.nsPrefix }}:8282/apidoc/explorer/index.html",
         "method": "HEAD",
         "header": {
           "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
     "name": "Health Check: VFC",
     "checks": [
       {
-        "id": "vfc-catalog.namespace-placeholder",
+        "id": "vfc-catalog.{{ .Values.nsPrefix }}",
         "name": "VFC catalog Health Check",
-        "tcp": "vfc-catalog.namespace-placeholder:8806",
+        "tcp": "vfc-catalog.{{ .Values.nsPrefix }}:8806",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-emsdriver.namespace-placeholder",
+        "id": "vfc-emsdriver.{{ .Values.nsPrefix }}",
         "name": "VFC emsdriver Health Check",
-        "tcp": "vfc-emsdriver.namespace-placeholder:8206",
+        "tcp": "vfc-emsdriver.{{ .Values.nsPrefix }}:8206",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-gvnfmdriver.namespace-placeholder",
+        "id": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}",
         "name": "VFC gvnfmdriver Health Check",
-        "tcp": "vfc-gvnfmdriver.namespace-placeholder:8484",
+        "tcp": "vfc-gvnfmdriver.{{ .Values.nsPrefix }}:8484",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-hwvnfmdriver.namespace-placeholder",
+        "id": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}",
         "name": "VFC hwvnfmdriver Health Check",
-        "tcp": "vfc-hwvnfmdriver.namespace-placeholder:8482",
+        "tcp": "vfc-hwvnfmdriver.{{ .Values.nsPrefix }}:8482",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-jujudriver.namespace-placeholder",
+        "id": "vfc-jujudriver.{{ .Values.nsPrefix }}",
         "name": "VFC jujudriver Health Check",
-        "tcp": "vfc-jujudriver.namespace-placeholder:8483",
+        "tcp": "vfc-jujudriver.{{ .Values.nsPrefix }}:8483",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-nokiavnfmdriver.namespace-placeholder",
+        "id": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}",
         "name": "VFC nokiavnfmdriver Health Check",
-        "tcp": "vfc-nokiavnfmdriver.namespace-placeholder:8486",
+        "tcp": "vfc-nokiavnfmdriver.{{ .Values.nsPrefix }}:8486",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-nslcm.namespace-placeholder",
+        "id": "vfc-nslcm.{{ .Values.nsPrefix }}",
         "name": "VFC nslcm Health Check",
-        "tcp": "vfc-nslcm.namespace-placeholder:8403",
+        "tcp": "vfc-nslcm.{{ .Values.nsPrefix }}:8403",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-resmgr.namespace-placeholder",
+        "id": "vfc-resmgr.{{ .Values.nsPrefix }}",
         "name": "VFC resmgr Health Check",
-        "tcp": "vfc-resmgr.namespace-placeholder:8480",
+        "tcp": "vfc-resmgr.{{ .Values.nsPrefix }}:8480",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-vnflcm.namespace-placeholder",
+        "id": "vfc-vnflcm.{{ .Values.nsPrefix }}",
         "name": "VFC vnflcm Health Check",
-        "tcp": "vfc-vnflcm.namespace-placeholder:8801",
+        "tcp": "vfc-vnflcm.{{ .Values.nsPrefix }}:8801",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-vnfmgr.namespace-placeholder",
+        "id": "vfc-vnfmgr.{{ .Values.nsPrefix }}",
         "name": "VFC vnfmgr Health Check",
-        "tcp": "vfc-vnfmgr.namespace-placeholder:8803",
+        "tcp": "vfc-vnfmgr.{{ .Values.nsPrefix }}:8803",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-vnfres.namespace-placeholder",
+        "id": "vfc-vnfres.{{ .Values.nsPrefix }}",
         "name": "VFC vnfres Health Check",
-        "tcp": "vfc-vnfres.namespace-placeholder:8802",
+        "tcp": "vfc-vnfres.{{ .Values.nsPrefix }}:8802",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-workflow.namespace-placeholder",
+        "id": "vfc-workflow.{{ .Values.nsPrefix }}",
         "name": "VFC workflow Health Check",
-        "tcp": "vfc-workflow.namespace-placeholder:10550",
+        "tcp": "vfc-workflow.{{ .Values.nsPrefix }}:10550",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-workflowengineactiviti.namespace-placeholder",
+        "id": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}",
         "name": "VFC workflow-engine Health Check",
-        "tcp": "vfc-workflowengineactiviti.namespace-placeholder:8080",
+        "tcp": "vfc-workflowengineactiviti.{{ .Values.nsPrefix }}:8080",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-ztesdncdriver.namespace-placeholder",
+        "id": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}",
         "name": "VFC ztesdncdriver Health Check",
-        "tcp": "vfc-ztesdncdriver.namespace-placeholder:8411",
+        "tcp": "vfc-ztesdncdriver.{{ .Values.nsPrefix }}:8411",
         "interval": "15s",
         "timeout": "1s"
       },
       {
-        "id": "vfc-ztevnfmdriver.namespace-placeholder",
+        "id": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}",
         "name": "VFC ztevnfmdriver Health Check",
-        "tcp": "vfc-ztevnfmdriver.namespace-placeholder:8410",
+        "tcp": "vfc-ztevnfmdriver.{{ .Values.nsPrefix }}:8410",
         "interval": "15s",
         "timeout": "1s"
       }
@@ -5,7 +5,7 @@
       {
         "id": "vid-server",
         "name": "VID Server Health Check",
-        "http": "http://vid-server.namespace-placeholder:8080/vid/healthCheck",
+        "http": "http://vid-server.{{ .Values.nsPrefix }}:8080/vid/healthCheck",
         "method": "GET",
         "header": {
           "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
@@ -20,7 +20,7 @@
       {
           "id": "vid-mariadb",
           "name": "Vid Mariadb Health Check",
-          "script": "/consul/config/scripts/vid-mariadb-script.sh",
+          "script": "/consul/scripts/vid-mariadb-script.sh",
           "interval": "10s",
           "timeout": "1s"
       }
diff --git a/kubernetes/consul/templates/consul-agent-configmap.yaml b/kubernetes/consul/templates/consul-agent-configmap.yaml
new file mode 100644 (file)
index 0000000..0f37f50
--- /dev/null
@@ -0,0 +1,25 @@
+#{{ if not .Values.disableConsulConsulAgent }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: consul-agent-configmap
+  namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/consul-agent-config/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: consul-agent-scripts-configmap
+  namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/consul-agent-config/scripts/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: consul-agent-certs-secret
+  namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/consul-agent-config/certs/*").AsSecrets . | indent 2 }}
+#{{ end }}
index a2bbe66..8d780f0 100644 (file)
@@ -18,15 +18,26 @@ spec:
       name: consul-agent
     spec:
       containers:
-      - image: "{{ .Values.consulimageRegistry }}:{{ .Values.consuldockerTag }}"
+      - image: "{{ .Values.consulagentRegistry }}"
         command: ["/usr/local/bin/docker-entrypoint.sh"]
         args: ["agent","-client","0.0.0.0","-enable-script-checks","-join","consul-server.{{ .Values.nsPrefix }}"]
         name: consul-server
         volumeMounts:
         - mountPath: /consul/config
           name: consul-agent-config
+        - mountPath: /consul/scripts
+          name: consul-agent-scripts-config
+        - mountPath: /consul/certs
+          name: consul-agent-certs-config
       volumes:
-      - hostPath:
-          path: {{ .Values.rootHostPath }}/{{ .Values.nsPrefix }}/consul/consul-agent-config
+      - configMap:
+          name: consul-agent-configmap
         name: consul-agent-config
+      - configMap:
+          name: consul-agent-scripts-configmap
+          defaultMode: 0755
+        name: consul-agent-scripts-config
+      - secret:
+          secretName: consul-agent-certs-secret
+        name: consul-agent-certs-config
 #{{ end }}
index 6aafee4..706aa0e 100644 (file)
@@ -18,15 +18,8 @@ spec:
       name: consul-server
     spec:
       containers:
-      - image: "{{ .Values.consulimageRegistry }}:{{ .Values.consuldockerTag }}"
+      - image: "{{ .Values.consulserverRegistry }}"
         command: ["/usr/local/bin/docker-entrypoint.sh"]
         args: ["agent","-server","-client","0.0.0.0","-enable-script-checks","-bootstrap-expect=3","-ui","-join","consul-server.{{ .Values.nsPrefix }}"]
         name: consul-server
-        volumeMounts:
-        - mountPath: /consul/config
-          name: consul-server-config
-      volumes:
-      - hostPath:
-          path: {{ .Values.rootHostPath }}/{{ .Values.nsPrefix }}/consul/consul-server-config
-        name: consul-server-config
 #{{ end }}
index a6909d7..3886727 100644 (file)
@@ -1,8 +1,7 @@
 nsPrefix: "onap"
 nodePortPrefix: 302
-consuldockerTag: "latest"
-rootHostPath: "/dockerdata-nfs"
-consulimageRegistry: "docker.io/consul"
+consulserverRegistry: "docker.io/consul:1.0.6"
+consulagentRegistry: "oomk8s/consul:1.0.0"
 consulAgentReplicas: 1
 consulServerReplicas: 3
 service:
index 8f55bd6..2df1530 100644 (file)
@@ -1,4 +1,4 @@
 apiVersion: v1
 description: A Helm chart for Kubernetes
 name: esr
-version: 1.1.0
+version: v1.0.0
diff --git a/kubernetes/esr/resources/config/log/esrserver/logback.xml b/kubernetes/esr/resources/config/log/esrserver/logback.xml
new file mode 100644 (file)
index 0000000..8b2c310
--- /dev/null
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration debug="true" scan="true" scanPeriod="3 seconds">
+   <!--<jmxConfigurator /> -->
+   <!--  specify the base path of the log directory -->
+   <property name="logDir" value="/var/log/onap" />
+   <!--  specify the component name -->
+   <property name="componentName" value="esr" />
+   <!-- specify the sub component name -->
+   <property name="subComponentName" value="xacml-pap-rest" />
+   <!-- The directories where logs are written -->
+   <property name="logDirectory" value="${logDir}/${componentName}/${subComponentName}" />
+   <property name="pattern" value="%d{&amp;quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&amp;quot;, UTC}\t[%thread]\t%-5level\t%logger{36}\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />
+   <!--  log file names -->
+   <property name="errorLogName" value="error" />
+   <property name="metricsLogName" value="metrics" />
+   <property name="auditLogName" value="audit" />
+   <property name="debugLogName" value="debug" />
+   <property name="queueSize" value="256" />
+   <property name="maxFileSize" value="50MB" />
+   <property name="maxHistory" value="30" />
+   <property name="totalSizeCap" value="10GB" />
+   <!-- Example evaluator filter applied against console appender -->
+   <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">
+      <encoder>
+         <pattern>${pattern}</pattern>
+      </encoder>
+   </appender>
\ No newline at end of file
diff --git a/kubernetes/esr/resources/config/log/filebeat/filebeat.yml b/kubernetes/esr/resources/config/log/filebeat/filebeat.yml
new file mode 100644 (file)
index 0000000..16c7943
--- /dev/null
@@ -0,0 +1,42 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /home/esr/works/logs/*.log
+#    - /var/log/onap/*/*/*/*.log
+#    - /var/log/onap/*/*/*.log
+#    - /var/log/onap/*/*.log
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.{{.Values.nsPrefix}}:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
index 386d401..a591a7e 100644 (file)
@@ -17,12 +17,18 @@ spec:
       name: esr-esrserver
     spec:
       containers:
-      - image: {{ .Values.esrserver.image }}
+      - name: esr-esrserver
+        image: {{ .Values.esrserver.image }}
         imagePullPolicy: {{ .Values.pullPolicy }}
-        name: esr-esrserver
         env:
         - name: MSB_ADDR
           value: {{ tpl .Values.msbaddr . }}
+        volumeMounts:
+        - name: localtime
+          mountPath: /etc/localtime
+          readOnly: true
+        - mountPath: /home/esr/works/logs
+          name: esr-server-logs
         ports:
         - containerPort: {{ .Values.esrserver.port }}
         readinessProbe:
@@ -30,6 +36,26 @@ spec:
             port: {{ .Values.esrserver.port }}
           initialDelaySeconds: 5
           periodSeconds: 10
-      imagePullSecrets:
-      - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+      - name: filebeat-onap-esr-server
+        image: {{ .Values.filebeat.image }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        volumeMounts:
+        - mountPath: /usr/share/filebeat/filebeat.yml
+          subPath: filebeat.yml
+          name: filebeat-conf
+        - mountPath: /home/esr/works/logs
+          name: esr-server-logs
+        - mountPath: /usr/share/filebeat/data
+          name: esr-server-filebeat
+      volumes:
+      - name: localtime
+        hostPath:
+          path: /etc/localtime
+      - name: filebeat-conf
+        configMap:
+          name: esr-filebeat-configmap
+      - name: esr-server-logs
+        emptyDir: {}
+      - name: esr-server-filebeat
+        emptyDir: {}
 #{{ end }}
diff --git a/kubernetes/esr/templates/esr-filebeat-configmap.yaml b/kubernetes/esr/templates/esr-filebeat-configmap.yaml
new file mode 100644 (file)
index 0000000..4bee5d1
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAaiAaiResources }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: esr-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/esr/templates/esr-server-log-configmap.yaml b/kubernetes/esr/templates/esr-server-log-configmap.yaml
new file mode 100644 (file)
index 0000000..3b94168
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAaiAaiResources }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: esr-esrserver-log-configmap
+  namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/log/esrserver/logback.xml").AsConfig . | indent 2 }}
+#{{ end }}
index b942672..85b34ac 100644 (file)
@@ -10,3 +10,5 @@ esrgui:
   image: nexus3.onap.org:10001/onap/aai/esr-gui:v1.0.0
   port: 8080
   replicas: 1
+filebeat:
+  image: docker.elastic.co/beats/filebeat:5.5.0
diff --git a/kubernetes/helm/starters/onap-app/.helmignore b/kubernetes/helm/starters/onap-app/.helmignore
new file mode 100644 (file)
index 0000000..f0c1319
--- /dev/null
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/kubernetes/helm/starters/onap-app/Chart.yaml b/kubernetes/helm/starters/onap-app/Chart.yaml
new file mode 100644 (file)
index 0000000..46f2377
--- /dev/null
@@ -0,0 +1,4 @@
+apiVersion: v1
+description: <Short application description - this is visible via 'helm search'>
+name: <onap-app>
+version: 2.0.0
\ No newline at end of file
diff --git a/kubernetes/helm/starters/onap-app/README.md b/kubernetes/helm/starters/onap-app/README.md
new file mode 100644 (file)
index 0000000..897a073
--- /dev/null
@@ -0,0 +1,14 @@
+# Starter Helm Chart for ONAP Applications
+
+Clone the onap-app directory and rename it to the name for your new Helm Chart.
+
+Helm Charts for specific applications should be moved into the oom/kubernetes
+directory. If the application is a common reusable Helm Chart (eg. mariadb), a
+more appropriate location might be the oom/kubernetes/common directory.
+
+Edit each yaml file in the new Helm Chart directoy, substituing real values
+for those inside brackets (eg. `<onap-app>`). Some comments have been provided in
+the file to help guide changes that need to be made. This starter Helm Chart is
+in no way complete. It can serve as the basis for creating a new Helm Chart that
+attempts to apply Helm best practices to ONAP applications being configured,
+deployed and managed in Kubernetes.
diff --git a/kubernetes/helm/starters/onap-app/requirements.yaml b/kubernetes/helm/starters/onap-app/requirements.yaml
new file mode 100644 (file)
index 0000000..acca8ef
--- /dev/null
@@ -0,0 +1,4 @@
+dependencies:
+  - name: common
+    version: ~2.0.0
+    repository: '@local'
\ No newline at end of file
diff --git a/kubernetes/helm/starters/onap-app/resources/config/README.txt b/kubernetes/helm/starters/onap-app/resources/config/README.txt
new file mode 100644 (file)
index 0000000..5cc0149
--- /dev/null
@@ -0,0 +1,10 @@
+This directory contains all external configuration files that
+need to be mounted into an application container.
+
+See the configmap.yaml in the templates directory for an example
+of how to load (ie map) config files from this directory, into
+Kubernetes, for distribution within the k8s cluster.
+
+See deployment.yaml in the templates directory for an example
+of how the 'config mapped' files are then mounted into the
+containers.
diff --git a/kubernetes/helm/starters/onap-app/resources/config/application.properties b/kubernetes/helm/starters/onap-app/resources/config/application.properties
new file mode 100644 (file)
index 0000000..496a15a
--- /dev/null
@@ -0,0 +1 @@
+sampleConfigKey=sampleConfigValue
\ No newline at end of file
diff --git a/kubernetes/helm/starters/onap-app/templates/NOTES.txt b/kubernetes/helm/starters/onap-app/templates/NOTES.txt
new file mode 100644 (file)
index 0000000..2465e03
--- /dev/null
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+  http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get svc -w {{ include "common.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/helm/starters/onap-app/templates/configmap.yaml b/kubernetes/helm/starters/onap-app/templates/configmap.yaml
new file mode 100644 (file)
index 0000000..5b8dde2
--- /dev/null
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.fullname" . }}-configmap
+  namespace: {{ include "common.namespace" . }}
+data:
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
\ No newline at end of file
diff --git a/kubernetes/helm/starters/onap-app/templates/deployment.yaml b/kubernetes/helm/starters/onap-app/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..2006b88
--- /dev/null
@@ -0,0 +1,90 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  template:
+    metadata:
+      labels:
+        app: {{ include "common.name" . }}
+        release: {{ .Release.Name }}
+    spec:
+      initContainers:
+#Example init container for dependency checking
+#      - command:
+#        - /root/ready.py
+#        args:
+#        - --container-name
+#        - mariadb
+#        env:
+#        - name: NAMESPACE
+#          valueFrom:
+#            fieldRef:
+#              apiVersion: v1
+#              fieldPath: metadata.namespace
+#        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+#        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+#        name: {{ include "common.name" . }}-readiness
+      containers:
+        - name: {{ include "common.name" . }}
+          image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}"
+          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          ports:
+          - containerPort: {{ .Values.service.internalPort }}
+          # disable liveness probe when breakpoints set in debugger
+          # so K8s doesn't restart unresponsive container
+          {{- if eq .Values.liveness.enabled true }}
+          livenessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.liveness.periodSeconds }}
+          {{ end -}}
+          readinessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readiness.periodSeconds }}
+          env:
+#Example environment variable passed to container
+#            - name: DEBUG_FLAG
+#              value: {{ .Values.global.debugEnabled | default .Values.debugEnabled | quote }}
+          volumeMounts:
+          - mountPath: /etc/localtime
+            name: localtime
+            readOnly: true
+#Example config file mount into container
+#          - mountPath: /opt/app/application.properties
+#            name: {{ include "common.name" . }}-config
+#            subPath: application.properties
+          resources:
+{{ toYaml .Values.resources | indent 12 }}
+        {{- if .Values.nodeSelector }}
+        nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+        {{- end -}}
+        {{- if .Values.affinity }}
+        affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+        {{- end }}
+
+      volumes:
+        - name: localtime
+          hostPath:
+            path: /etc/localtime
+#Example config file mount into container
+#        - name: {{ include "common.fullname" . }}-config
+#          configMap:
+#            name: {{ include "common.fullname" . }}-configmap
+#            items:
+#            - key: application.properties
+#              path: application.properties
+      imagePullSecrets:
+      - name: "{{ include "common.namespace" . }}-docker-registry-key"
\ No newline at end of file
diff --git a/kubernetes/helm/starters/onap-app/templates/service.yaml b/kubernetes/helm/starters/onap-app/templates/service.yaml
new file mode 100644 (file)
index 0000000..afa2e3b
--- /dev/null
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+  annotations:
+# Example MSB registration annotation
+#    msb.onap.org/service-info: '[
+#      {
+#          "serviceName": "so",
+#          "version": "v1",
+#          "url": "/ecomp/mso/infra",
+#          "protocol": "REST"
+#          "port": "8080",
+#          "visualRange":"1"
+#      }
+#      ]'
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    {{if eq .Values.service.type "NodePort" -}}
+    - port: {{ .Values.service.externalPort }}
+      #Example internal target port if required
+      #targetPort: {{ .Values.service.internalPort }}
+      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+      name: {{ .Values.service.name }}
+    {{- else -}}
+    - port: {{ .Values.service.externalPort }}
+      targetPort: {{ .Values.service.internalPort }}
+      name: {{ .Values.service.name }}
+    {{- end}}
+  selector:
+    app: {{ include "common.name" . }}
+    release: {{ .Release.Name }}
\ No newline at end of file
diff --git a/kubernetes/helm/starters/onap-app/values.yaml b/kubernetes/helm/starters/onap-app/values.yaml
new file mode 100644 (file)
index 0000000..4d1f44f
--- /dev/null
@@ -0,0 +1,78 @@
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+  nodePortPrefix: 302
+  repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
+  readinessRepository: oomk8s
+  readinessImage: readiness-check:1.0.0
+  loggingRepository: docker.elastic.co
+  loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+# application image
+repository: nexus3.onap.org:10001
+image: <onap-app>:<1.2-STAGING-latest>
+pullPolicy: Always
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+# application configuration
+# Example:
+config:
+#  username: myusername
+#  password: mypassword
+
+# default number of instances
+replicaCount: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  # necessary to disable liveness probe when setting breakpoints
+  # in debugger so K8s doesn't restart unresponsive container
+  enabled: true
+
+readiness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+
+service:
+  #Example service definition with external, internal and node ports.
+  #Services may use any combination of ports depending on the 'type' of
+  #service being defined.
+  type: NodePort
+  name: <onap-app>
+  externalPort: <8080>
+  internalPort: <80>
+  nodePort: <replace with unused node port suffix eg. 23>
+
+ingress:
+  enabled: false
+
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  #
+  # Example:
+  # Configure resource requests and limits
+  # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  # Minimum memory for development is 2 CPU cores and 4GB memory
+  # Minimum memory for production is 4 CPU cores and 8GB memory
+#resources:
+#  limits:
+#    cpu: 2
+#    memory: 4Gi
+#  requests:
+#    cpu: 2
+#    memory: 4Gi
index 10e3be4..d6c12ea 100644 (file)
@@ -19,13 +19,13 @@ iagPort: 80
 iagPort_https: 443
 iagNodePort: 30080
 iagNodePort_https: 30443
-iagReplicas: 1
+iagReplicas: 2
 
 eagPort: 80
 eagPort_https: 443
 eagNodePort: 30082
 eagNodePort_https: 30446
-eagReplicas: 1
+eagReplicas: 2
 
 kubeMasterUrl: https://kubernetes.default.svc.cluster.local:443
 discoveryUrl: http://msb-discovery.{{ .Values.nsPrefix }}:10081
index a8fd3eb..29331fb 100644 (file)
@@ -67,7 +67,7 @@ dependencies:
     repository: '@local'
     condition: portal.enabled
   - name: robot
-    version: ~0.1.0
+    version: ~2.0.0
     repository: '@local'
     condition: robot.enabled
   - name: sdc
index 96455a0..eb1eec4 100644 (file)
@@ -64,7 +64,7 @@ policy:
 portal:
   enabled: false
 robot: # Robot Health Check
-  enabled: false
+  enabled: true
 sdc:
   enabled: false
 sdnc:
index 586046a..f0c1319 100644 (file)
@@ -1,7 +1,21 @@
 # Patterns to ignore when building packages.
 # This supports shell glob matching, relative path matching, and
 # negation (prefixed with !). Only one pattern per line.
-
-# k8s scripts
-ete-k8s.sh
-demo-k8s.sh
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
index 1f59785..d37773d 100644 (file)
@@ -1,4 +1,4 @@
 apiVersion: v1
-description: A Helm chart for Kubernetes
+description: A helm Chart for kubernetes-ONAP Robot
 name: robot
-version: 0.1.0
+version: 2.0.0
diff --git a/kubernetes/robot/all-services.yaml b/kubernetes/robot/all-services.yaml
deleted file mode 100644 (file)
index 1fbabe2..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  labels:
-    app: robot
-  name: robot
-spec:
-  ports:
-  - port: 88
-    nodePort: {{ .Values.nodePortPrefix }}09
-  selector:
-    app: robot
-  type: NodePort
diff --git a/kubernetes/robot/requirements.yaml b/kubernetes/robot/requirements.yaml
new file mode 100644 (file)
index 0000000..56029ab
--- /dev/null
@@ -0,0 +1,7 @@
+dependencies:
+  - name: common
+    version: ~2.0.0
+    # local reference to common chart, as it is
+    # a part of this chart's package and will not
+    # be published independently to a repo (at this point)
+    repository: '@local'
\ No newline at end of file
diff --git a/kubernetes/robot/templates/NOTES.txt b/kubernetes/robot/templates/NOTES.txt
new file mode 100644 (file)
index 0000000..91d8ed4
--- /dev/null
@@ -0,0 +1,19 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range .Values.ingress.hosts }}
+  http://{{ . }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+  echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
+{{- end }}
diff --git a/kubernetes/robot/templates/all-services.yaml b/kubernetes/robot/templates/all-services.yaml
deleted file mode 100644 (file)
index 604982b..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#{{ if not .Values.disableRobotRobot }}
-apiVersion: v1
-kind: Service
-metadata:
-  labels:
-    app: robot
-  name: robot
-  namespace: "{{ .Values.nsPrefix }}"
-spec:
-  ports:
-  - port: 88
-    nodePort: {{ .Values.nodePortPrefix }}09
-  selector:
-    app: robot
-  type: NodePort
-#{{ end }}
\ No newline at end of file
@@ -1,25 +1,23 @@
-#{{ if not .Values.disableRobot }}
 apiVersion: v1
 kind: ConfigMap
 metadata:
-  name: robot-eteshare-configmap
-  namespace: {{ .Values.nsPrefix }}
+  name: {{ include "common.name" . }}-eteshare-configmap
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/eteshare/config/*").AsConfig . | indent 2 }}
 ---
 apiVersion: v1
 kind: ConfigMap
 metadata:
-  name: robot-resources-configmap
-  namespace: {{ .Values.nsPrefix }}
+  name: {{ include "common.name" . }}-resources-configmap
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/robot/resources/*").AsConfig . | indent 2 }}
 ---
 apiVersion: v1
 kind: ConfigMap
 metadata:
-  name: lighttpd-authorization-configmap
-  namespace: {{ .Values.nsPrefix }}
+  name: {{ include "common.name" . }}-lighttpd-authorization-configmap
+  namespace: {{ include "common.namespace" . }}
 data:
 {{ tpl (.Files.Glob "resources/config/authorization").AsConfig . | indent 2 }}
-#{{ end }}
diff --git a/kubernetes/robot/templates/deployment.yaml b/kubernetes/robot/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..596d622
--- /dev/null
@@ -0,0 +1,82 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: {{ include "common.name" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  replicas: {{ .Values.robotReplicas }}
+  template:
+    metadata:
+       labels:
+        app: {{ include "common.name" . }}
+        release: {{ .Release.Name }}
+    spec:
+      containers:
+        - name: {{ .Chart.Name }}
+          image: "{{ .Values.global.repository | default .Values.repository }}/{{ .Values.image }}"
+          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          ports:
+          - containerPort: {{ .Values.service.internalPort }}
+          # disable liveness probe when breakpoints set in debugger
+          # so K8s doesn't restart unresponsive container
+          {{- if eq .Values.liveness.enabled true }}
+          livenessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.liveness.periodSeconds }}
+          {{ end -}}
+          readinessProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort }}
+            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readiness.periodSeconds }}
+          volumeMounts:
+          - name: localtime
+            mountPath: /etc/localtime
+            readOnly: true
+          - name: robot-eteshare
+            mountPath: /share/config
+          - name: robot-resources
+            mountPath: /var/opt/OpenECOMP_ETE/robot/resources/asdc_interface.robot
+            subPath: asdc_interface.robot
+          - name: robot-resources
+            mountPath: /var/opt/OpenECOMP_ETE/robot/resources/policy_interface.robot
+            subPath: policy_interface.robot
+          - name: robot-resources
+            mountPath: /var/opt/OpenECOMP_ETE/robot/resources/sdngc_interface.robot
+            subPath: sdngc_interface.robot
+          - name: robot-lighttpd-authorization
+            mountPath: /etc/lighttpd/authorization
+            subPath: authorization
+          resources:
+{{ toYaml .Values.resources | indent 12 }}
+        {{- if .Values.nodeSelector }}
+        nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+        {{- end -}}
+        {{- if .Values.affinity }}
+        affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+        {{- end }}
+      volumes:
+        - name: localtime
+          hostPath:
+            path: /etc/localtime
+        - name: robot-eteshare
+          configMap:
+            name: {{ include "common.name" . }}-eteshare-configmap
+            defaultMode: 0755
+        - name: robot-resources
+          configMap:
+            name: {{ include "common.name" . }}-resources-configmap
+        - name: robot-lighttpd-authorization
+          configMap:
+            name: {{ include "common.name" . }}-lighttpd-authorization-configmap
+      imagePullSecrets:
+      - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/robot/templates/robot-deployment.yaml b/kubernetes/robot/templates/robot-deployment.yaml
deleted file mode 100644 (file)
index 9f936e8..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-#{{ if not .Values.disableRobotRobot }}
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: robot
-  namespace: "{{ .Values.nsPrefix }}"
-spec:
-  replicas: {{ .Values.robotReplicas }}
-  selector:
-    matchLabels:
-      app: robot
-  template:
-    metadata:
-       labels:
-        app: robot
-       name: robot
-    spec:
-      containers:
-      - image: {{ .Values.image.testsuite }}
-        imagePullPolicy: {{ .Values.pullPolicy }}
-        name: robot
-        volumeMounts:
-        - name: localtime
-          mountPath: /etc/localtime
-          readOnly: true
-        - name: robot-eteshare
-          mountPath: /share/config
-        - name: robot-resources
-          mountPath: /var/opt/OpenECOMP_ETE/robot/resources/asdc_interface.robot
-          subPath: asdc_interface.robot
-        - name: robot-resources
-          mountPath: /var/opt/OpenECOMP_ETE/robot/resources/policy_interface.robot
-          subPath: policy_interface.robot
-        - name: robot-resources
-          mountPath: /var/opt/OpenECOMP_ETE/robot/resources/sdngc_interface.robot
-          subPath: sdngc_interface.robot
-        - name: lighttpd-authorization
-          mountPath: /etc/lighttpd/authorization
-          subPath: authorization
-        ports:
-        - containerPort: 88
-        readinessProbe:
-          tcpSocket:
-            port: 88
-          initialDelaySeconds: 5
-          periodSeconds: 10
-      volumes:
-        - name: localtime
-          hostPath:
-            path: /etc/localtime
-        - name: robot-eteshare
-          configMap:
-            name: robot-eteshare-configmap
-            defaultMode: 0755
-        - name: robot-resources
-          configMap:
-            name: robot-resources-configmap
-        - name: lighttpd-authorization
-          configMap:
-            name: lighttpd-authorization-configmap
-      imagePullSecrets:
-      - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
diff --git a/kubernetes/robot/templates/service.yaml b/kubernetes/robot/templates/service.yaml
new file mode 100644 (file)
index 0000000..90d0ab8
--- /dev/null
@@ -0,0 +1,25 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "common.name" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    {{if eq .Values.service.type "NodePort" -}}
+    - port: {{ .Values.service.internalPort }}
+      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.externalPort }}
+      name: {{ .Values.service.name }}
+    {{- else -}}
+    - port: {{ .Values.service.externalPort }}
+      targetPort: {{ .Values.service.internalPort }}
+      name: {{ .Values.service.name }}
+    {{- end}}
+  selector:
+    app: {{ include "common.name" . }}
+    release: {{ .Release.Name }}
index ea5d2ae..a2da2b7 100644 (file)
@@ -1,7 +1,31 @@
-nsPrefix: onap
+#################################################################
+# Global configuration defaults.
+#################################################################
+global: # global defaults
+  nodePortPrefix: 302
+  repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
+  readinessRepository: oomk8s
+  readinessImage: readiness-check:1.0.0
+  loggingRepository: docker.elastic.co
+  loggingImage: beats/filebeat:5.5.0
+
+subChartsOnly:
+  enabled: true
+
+# application image
+repository: nexus3.onap.org:10001
+image: openecomp/testsuite:1.1-STAGING-latest
 pullPolicy: Always
-nodePortPrefix: 302
-robotReplicas: 1
+
+# flag to enable debugging - application support required
+debugEnabled: false
+
+#################################################################
+# Application configuration defaults.
+#################################################################
+
+# openstack configuration
+
 demoArtifactsVersion: "1.1.0-SNAPSHOT"
 openStackPrivateNetCidr: "192.168.30.0"
 openStackFlavourMedium: "m1.medium"
@@ -15,5 +39,53 @@ openStackPrivateSubnetId: "e8f51956-00dd-4425-af36-045716781ffc"
 openStackTenantId: "47899782ed714295b1151681fdfd51f5"
 openStackUserName: "vnf_user"
 ubuntuImage: "Ubuntu_14.04.5_LTS"
-image:
-  testsuite: nexus3.onap.org:10001/openecomp/testsuite:1.1-STAGING-latest
+
+# default number of instances
+robotReplicas: 1
+
+nodeSelector: {}
+
+affinity: {}
+
+# probe configuration parameters
+liveness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  # necessary to disable liveness probe when setting breakpoints
+  # in debugger so K8s doesn't restart unresponsive container
+  enabled: true
+
+readiness:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+
+
+service:
+  type: NodePort
+  name: robot
+  externalPort: "09"
+  internalPort: 88
+
+
+ingress:
+  enabled: false
+
+
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  #
+  # Example:
+  # Configure resource requests and limits
+  # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  # Minimum memory for development is 2 CPU cores and 4GB memory
+  # Minimum memory for production is 4 CPU cores and 8GB memory
+#resources:
+#  limits:
+#    cpu: 2
+#    memory: 4Gi
+#  requests:
+#    cpu: 2
+#    memory: 4Gi