relationships:
- type: cloudify.relationships.connected_to
target: k8s_env
+
+ nbi:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace }
+ - nbi
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_env
\ No newline at end of file
operational complexity and an inability to apply global parameters across the
entire ONAP deployment. OOM solves this problem by introducing a common
configuration technology, Helm charts, that provide a hierarchical
-configuration configuration with the ability to override values with higher
+configuration with the ability to override values with higher
level charts or command line options.
The structure of the configuration of ONAP is shown in the following diagram.
- a set of liveness probes which feed into the Kubernetes manager which
are described in the Heal section.
-Within ONAP Consul is the monitoring system of choice and deployed by OOM in two parts:
+Within ONAP, Consul is the monitoring system of choice and deployed by OOM in two parts:
- a three-way, centralized Consul server cluster is deployed as a highly
- available monitor of all of the ONAP components,and
+ available monitor of all of the ONAP components, and
- a number of Consul agents.
The Consul server provides a user interface that allows a user to graphically
service:
type: NodePort
portName: babel
- externalPort: 79
+ externalPort: 9516
internalPort: 9516
nodePort: 79
service:
type: NodePort
- name: champ
- portName: champ
+ portName: aai-champ
internalPort: 9522
nodePort: 78
service:
type: NodePort
- name: crud-service
- portName: crud-service
+ name: aai-crud-service
+ portName: aai-crud-service
internalPort: 9520
nodePort: 68
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.servicename" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.servicename" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.internalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
- port: {{ .Values.service.internalPort2 }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
- name: {{ .Values.service.name }}2
+ name: {{ .Values.service.portName2 }}
{{- else -}}
- port: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
- port: {{ .Values.service.internalPort2 }}
- name: {{ .Values.service.name }}2
+ name: {{ .Values.service.portName2 }}
{{- end}}
selector:
app: {{ include "common.name" . }}
service:
type: NodePort
- name: aai-modelloader
+ portName: aai-modelloader
externalPort: 8080
internalPort: 8080
nodePort: 10
+ portName2: aai-modelloader-ssl
externalPort2: 8443
internalPort2: 8443
nodePort2: 29
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.servicename" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.internalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
{{- else -}}
- port: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
{{- end}}
selector:
app: {{ include "common.name" . }}
service:
type: ClusterIP
- name: aai-search-data
+ portName: aai-search-data
internalPort: 9509
ingress:
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.internalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
{{- else -}}
- port: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
{{- end}}
selector:
app: {{ include "common.name" . }}
global: # global defaults
nodePortPrefix: 302
aai:
- serviceName: aai-aai
+ serviceName: aai
aaiElasticsearch:
serviceName: aai-elasticsearch
gizmo:
service:
type: ClusterIP
- name: aai-sparky-be
+ portName: aai-sparky-be
internalPort: 9517
internalPort2: 8000
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.servicename" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
spec:
ports:
{{if eq .Values.service.type "NodePort" -}}
- - name: {{ .Values.service.name }}
- port: {{ .Values.service.internalPort }}
+ - name: {{ .Values.service.portName }}
+ port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
- - name: {{ .Values.service.name }}2
- port: {{ .Values.service.internalPort2 }}
+ - name: {{ .Values.service.portName2 }}
+ port: {{ .Values.service.externalPort2 }}
targetPort: {{ .Values.service.internalPort2 }}
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
- name: {{ .Values.service.name }}
+ name: {{ .Values.service.portName }}
- port: {{ .Values.service.externalPort2 }}
targetPort: {{ .Values.service.internalPort2 }}
- name: {{ .Values.service.name }}2
+ name: {{ .Values.service.portName }}
{{- end}}
type: {{ .Values.service.type }}
selector:
app: {{ include "common.name" . }}
- clusterIP: {{ .Values.config.aaiServiceClusterIp }}
+ clusterIP: {{ .Values.service.aaiServiceClusterIp }}
serviceName: aai-cassandra
replicas: 3
aai:
- serviceName: aai-aai
+ serviceName: aai
babel:
serviceName: aai-babel
champ:
# application configuration
config:
- # POLICY hotfix - Note this must be temporary
- # See https://jira.onap.org/browse/POLICY-510
- aaiServiceClusterIp:
logstashServiceName: log-ls
logstashPort: 5044
service:
type: NodePort
- name: aai
+ portName: aai
externalPort: 8080
internalPort: 8080
nodePort: 32
+ portName2: aai-ssl
externalPort2: 8443
internalPort2: 8443
nodePort2: 33
+ # POLICY hotfix - Note this must be temporary
+ # See https://jira.onap.org/browse/POLICY-510
+ aaiServiceClusterIp:
ingress:
enabled: false
"clamp.config.sdc.serviceUsername": "clamp",
"clamp.config.sdc.servicePassword": "b7acccda32b98c5bb7acccda32b98c5b05D511BD6D93626E90D18E9D24D9B78CD34C7EE8012F0A189A28763E82271E50A5D4EC10C7D93E06E0A2D27CAE66B981",
"clamp.config.files.sdcController": "file:/opt/clamp/sdc-controllers-config.json",
- "clamp.config.dcae.inventory.url": "http://dcaegen2.{{ include "common.namespace" . }}:8080",
- "clamp.config.dcae.dispatcher.url": "http://dcaegen2.{{ include "common.namespace" . }}:8080",
+ "clamp.config.dcae.inventory.url": "http://inventory.{{ include "common.namespace" . }}:8080",
+ "clamp.config.dcae.dispatcher.url": "http://deployment-handler.{{ include "common.namespace" . }}:8443",
"clamp.config.policy.pdpUrl1": "https://pdp.{{ include "common.namespace" . }}:9091/pdp/ , testpdp, alpha123",
"clamp.config.policy.pdpUrl2": "https://pdp.{{ include "common.namespace" . }}:9091/pdp/ , testpdp, alpha123",
"clamp.config.policy.papUrl": "https://pap.{{ include "common.namespace" . }}:8443/pap/ , testpap, alpha123",
internalPort: "80"
internalPort1: 8080
nodePort: "60"
- nodePort1: "61"
+ nodePort1: "71"
ingress:
enabled: false
mountPath: /opt/app/application.properties
subPath: application.properties
- name: config
- mountPath: /opt/onap/sdnc/dgbuilder/releases/sdnc1.0/conf/svclogic.properties
+ mountPath: /opt/onap/ccsdk/dgbuilder/releases/sdnc1.0/conf/svclogic.properties
subPath: svclogic.properties
- name: config
- mountPath: /opt/onap/sdnc/dgbuilder/svclogic/svclogic.properties
+ mountPath: /opt/onap/ccsdk/dgbuilder/svclogic/svclogic.properties
subPath: svclogic.properties
- name: scripts
- mountPath: /opt/onap/sdnc/dgbuilder/createReleaseDir.sh
+ mountPath: /opt/onap/ccsdk/dgbuilder/createReleaseDir.sh
subPath: createReleaseDir.sh
- name: scripts
- mountPath: /opt/onap/sdnc/dgbuilder/releases/sdnc1.0/customSettings.js
+ mountPath: /opt/onap/ccsdk/dgbuilder/releases/sdnc1.0/customSettings.js
subPath: customSettings.js
resources:
{{ toYaml .Values.resources | indent 12 }}
--- /dev/null
+{
+ "service": {
+ "name": "Health Check: Secret Management Service (sms)",
+ "check":[
+ {
+ "id" : "aaf-sms-health",
+ "name": "SMS Health Check",
+ "http": "https://aaf-sms.{{ .Release.Namespace }}:10443/v1/sms/healthcheck",
+ "tls_skip_verify": true,
+ "method": "GET",
+ "interval": "20s",
+ "timeout": "5s"
+ }
+ ]
+ }
+}
+
+++ /dev/null
-{
- "service": {
- "name": "Health Check: Message Router - DMaaP",
- "check": {
- "http": "http://message-router:3904/topics",
- "interval": "30s",
- "timeout": "1s"
- }
- }
-}
--- /dev/null
+{
+ "service": {
+ "name": "Health Check: DMaaP",
+ "checks":[
+ {
+ "id": "dmaap",
+ "name": "Health Check: Message Router",
+ "http": "http://message-router:3904/topics",
+ "tls_skip_verify": true,
+ "interval": "30s",
+ "timeout": "1s"
+ },
+ {
+ "id": "mr-zookeeper",
+ "name": "Health Check: Message Router - ZooKeeper",
+ "script": "/consul/scripts/mr-zookeeper-health.sh",
+ "interval": "10s",
+ "timeout": "5s"
+ },
+ {
+ "id": "mr-kafka",
+ "name": "Health Check: Message Router - Kafka",
+ "script": "/consul/scripts/mr-kafka-health.sh",
+ "interval": "30s",
+ "timeout": "5s"
+ }
+ ]
+ }
+}
+++ /dev/null
-{
- "service": {
- "name": "Health Check: Message Router - Kafka",
- "check": {
- "script": "/consul/scripts/mr-kafka-health.sh",
- "interval": "30s",
- "timeout": "1s"
- }
- }
-}
+++ /dev/null
-{
- "service": {
- "name": "Health Check: Message Router - ZooKeeper",
- "check": {
- "script": "/consul/scripts/mr-zookeeper-health.sh",
- "interval": "30s",
- "timeout": "1s"
- }
- }
-}
-kafkapod=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "message-router-kafka-[^[:space:]]*")
+kafkapod=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "[^[:space:]]*-message-router-kafka-[^[:space:]]*")
if [ -n "$kafkapod" ]; then
if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $kafkapod -- ps ef | grep -i kafka; then
echo Success. Kafka process is running. 2>&1
-zkpod=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*")
+zkpod=$(/consul/bin/kubectl -n {{ include "common.namespace" . }} get pod | grep -o "[^[:space:]]*-message-router-zookeeper-[^[:space:]]*")
if [ -n "$zkpod" ]; then
- if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $zkpod -- ps ef | grep -i zookeeper; then
+ if /consul/bin/kubectl -n {{ include "common.namespace" . }} exec -it $zkpod -- ps aux | grep -i zookeeper; then
echo Success. Zookeeper process is running. 2>&1
exit 0
else
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/aai/esr-gui:1.1.0-SNAPSHOT
+image: onap/aai/esr-gui:latest
pullPolicy: Always
msbaddr: msb-iag.{{ include "common.namespace" . }}:80
# application image
repository: nexus3.onap.org:10001
-image: onap/aai/esr-server:1.1.0-SNAPSHOT
+image: onap/aai/esr-server:latest
pullPolicy: Always
msbaddr: msb-iag.{{ include "common.namespace" . }}:80
# probe configuration parameters
liveness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 120
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 10
+ initialDelaySeconds: 120
periodSeconds: 10
## Persist data to a persitent volume
# probe configuration parameters
liveness:
- initialDelaySeconds: 180
+ initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 1
# necessary to disable liveness probe when setting breakpoints
enabled: true
readiness:
- initialDelaySeconds: 180
+ initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 1
msbPort: 80
aai:
port: 8443
- schemaVersion: v11
+ schemaVersion: v13
username: AAI
password: AAI
msbPort: 80
aai:
port: 8443
- schemaVersion: v11
+ schemaVersion: v13
username: AAI
password: AAI
msbPort: 80
aai:
port: 8443
- schemaVersion: v11
+ schemaVersion: v13
username: AAI
password: AAI
logstashPort: 5044
aai:
port: 8443
- schemaVersion: v11
+ schemaVersion: v13
username: AAI
password: AAI
- name: NBI_URL
value: "http://nbi.{{ include "common.namespace" . }}:8080/nbi/api/v1"
- name: SDC_HOST
- value: "http://sdc-fe.{{ include "common.namespace" . }}:8080"
+ value: "http://sdc-be.{{ include "common.namespace" . }}:8080"
- name: SDC_HEADER_ECOMPINSTANCEID
value: {{ .Values.config.ecompInstanceId }}
- name: SDC_HEADER_AUTHORIZATION
value: {{ .Values.sdc_authorization }}
- name: AAI_HOST
- value: "http://aai.{{ include "common.namespace" . }}:8443"
+ value: "https://aai.{{ include "common.namespace" . }}:8443"
- name: AAI_HEADER_AUTHORIZATION
value: {{ .Values.aai_authorization }}
- name: SO_HOST
type: NodePort
portName: api
name: nbi
- nodePort: 64
+ nodePort: 74
internalPort: 8080
ingress:
enabled: false
multicloud:
enabled: false
+nbi:
+ enabled: false
policy:
enabled: false
portal:
enabled: false
multicloud:
enabled: false
+nbi:
+ enabled: false
oof:
enabled: false
policy:
# AAI
-AAI_URL=https://aai.api.simpledemo.openecomp.org:8443
+AAI_URL=https://aai.{{.Release.Namespace}}:8443
AAI_USERNAME=POLICY
AAI_PASSWORD=POLICY
# MSO
-SO_URL=http://mso:8080/ecomp/mso/infra
+SO_URL=http://so.{{.Release.Namespace}}:8080/ecomp/mso/infra
SO_USERNAME=InfraPortalClient
SO_PASSWORD=password1$
-#! /bin/bash
+#! /bin/bash -xv
# Copyright © 2017-2018 Amdocs, Bell Canada, AT&T
#
# limitations under the License.
-${POLICY_HOME}/bin/features enable healthcheck
-${POLICY_HOME}/bin/features enable pooling-dmaap
-${POLICY_HOME}/bin/features enable distributed-locking
+"${POLICY_HOME}"/bin/features enable healthcheck
+"${POLICY_HOME}"/bin/features enable pooling-dmaap
+"${POLICY_HOME}"/bin/features enable distributed-locking
-${POLICY_HOME}/bin/db-migrator -s pooling -o upgrade
+"${POLICY_HOME}"/bin/db-migrator -s pooling -o upgrade
+
+# make sure the PDPD-CONFIGURATION anonymous topic is created
+# so not to lose any configuration updates
+
+echo
+echo "creating PDPD-CONFIGURATION topic"
+echo
+
+curl --silent --connect-timeout 60 -X POST --header "Content-Type: application/json" -d "{}" http://message-router:3904/events/PDPD-CONFIGURATION
+
+echo
+echo "removing PDPD-CONFIGURATION topic dummy message"
+echo
+
+curl --silent --connect-timeout 60 -X GET http://message-router:3904/events/PDPD-CONFIGURATION/1/1?timeout=15000
+
+# for resiliency/scalability scenarios, check to see
+# if there's an amsterdam artifact already deployed
+# by brmsgw. If so, update the amsterdam controller
+# coordinates. In the future, a more sophisticated
+# solution will be put in place, that will required
+# coordination among policy components.
+
+echo
+echo "checking if there are amsterdam policies already deployed .."
+echo
+
+AMSTERDAM_VERSION=$(curl --silent --connect-timeout 60 -X GET "http://nexus:8081/nexus/service/local/artifact/maven/resolve?r=releases&g=org.onap.policy-engine.drools.amsterdam&a=policy-amsterdam-rules&v=RELEASE" | grep -Po "(?<=<version>).*(?=</version>)")
+
+if [[ -z ${AMSTERDAM_VERSION} ]]; then
+ echo "no amsterdam policies have been found .."
+ exit 0
+fi
+
+echo
+echo "The latest deployed amsterdam artifact in nexus has version ${AMSTERDAM_VERSION}"
+echo
+
+sed -i.INSTALL -e "s/^rules.artifactId=.*/rules.artifactId=policy-amsterdam-rules/g" \
+ -e "s/^rules.groupId=.*/rules.groupId=org.onap.policy-engine.drools.amsterdam/g" \
+ -e "s/^rules.version=.*/rules.version=${AMSTERDAM_VERSION}/g" "${POLICY_HOME}"/config/amsterdam-controller.properties
+
+echo
+echo "amsterdam controller will be started brained with maven coordinates:"
+echo
+
+grep "^rules" "${POLICY_HOME}"/config/amsterdam-controller.properties
+
+echo
+echo
image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
name: {{ include "common.name" . }}-readiness
- hostAliases:
- - ip: "{{ .Values.config.aaiServiceClusterIp }}"
- hostnames:
- - "aai.api.simpledemo.openecomp.org"
containers:
- name: {{ include "common.name" . }}
image: "{{ include "common.repository" . }}/{{ .Values.image }}"
# application configuration
config:
nexusPort: 8081
- aaiServiceClusterIp: 10.43.255.254
# default number of instances
-replicaCount: 4
+replicaCount: 2
nodeSelector: {}
LOGPARSER_X_MX_MB=1024
SERVER=http://{{ include "common.servicename" . }}:{{.Values.service.externalPort}}/pdp/
-LOGPATH=/opt/app/policy/servers/pdp/logs/pdp-rest.log
-PARSERLOGPATH=IntegrityMonitor.log
+LOGPATH=/var/log/onap/policy/pdpx/pdp-rest.log
+PARSERLOGPATH=/opt/app/policy/servers/pdplp/bin/IntegrityMonitor.log
node_type=logparser
# the java property is RESOURCE_NAME (uppercase), but the conf parameter is lowercase
LOGPARSER_X_MX_MB=1024
SERVER=http://{{ include "common.servicename" . }}:{{.Values.service.externalPort2}}/pap/
-LOGPATH=/opt/app/policy/servers/pap/logs/pap-rest.log
-PARSERLOGPATH=IntegrityMonitor.log
+LOGPATH=/var/log/onap/policy/pap/pap-rest.log
+PARSERLOGPATH=/opt/app/policy/servers/paplp/bin/IntegrityMonitor.log
node_type=logparser
# the java property is RESOURCE_NAME (uppercase), but the conf parameter is lowercase
# probe configuration parameters
liveness:
- initialDelaySeconds: 300
+ initialDelaySeconds: 450
periodSeconds: 10
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
readiness:
- initialDelaySeconds: 300
+ initialDelaySeconds: 450
periodSeconds: 10
## Persist data to a persitent volume