Merge "Bug fix: CDS Chart update to align with processor-db"
authorYang Xu <yang.xu3@huawei.com>
Wed, 22 May 2019 10:07:10 +0000 (10:07 +0000)
committerGerrit Code Review <gerrit@onap.org>
Wed, 22 May 2019 10:07:10 +0000 (10:07 +0000)
kubernetes/clamp/values.yaml
kubernetes/common/etcd/templates/pv.yaml
kubernetes/common/etcd/templates/statefulset.yaml
kubernetes/common/etcd/values.yaml
kubernetes/multicloud/charts/multicloud-k8s/requirements.yaml
kubernetes/multicloud/charts/multicloud-k8s/resources/config/k8sconfig.json
kubernetes/multicloud/charts/multicloud-k8s/values.yaml
kubernetes/sdnc/resources/config/bin/startODL.sh

index f4c579b..dcafe13 100644 (file)
@@ -30,7 +30,7 @@ flavor: small
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/clamp:4.0.1
+image: onap/clamp:4.0.2
 pullPolicy: Always
 
 # flag to enable debugging - application support required
@@ -59,10 +59,10 @@ config:
         "clamp.config.dcae.deployment.url": "https4://deployment-handler.{{ include "common.namespace" . }}:8443",
         "clamp.config.dcae.deployment.userName": "none",
         "clamp.config.dcae.deployment.password": "none",
-        "clamp.config.policy.api.url": "http4://policy-api.{{ include "common.namespace" . }}:6969",
+        "clamp.config.policy.api.url": "https4://policy-api.{{ include "common.namespace" . }}:6969",
         "clamp.config.policy.api.userName": "healthcheck",
         "clamp.config.policy.api.password": "zb!XztG34",
-        "clamp.config.policy.pap.url": "http4://policy-pap.{{ include "common.namespace" . }}:6969",
+        "clamp.config.policy.pap.url": "https4://policy-pap.{{ include "common.namespace" . }}:6969",
         "clamp.config.policy.pap.userName": "healthcheck",
         "clamp.config.policy.pap.password": "zb!XztG34",
         "clamp.config.policy.pdpUrl1": "https://pdp.{{ include "common.namespace" . }}:8081/pdp/ , testpdp, alpha123",
index eeaa645..65993e5 100644 (file)
 apiVersion: v1
 kind: PersistentVolume
 metadata:
-  name: {{ $root.Release.Name }}-{{ $root.Values.service.name }}-{{ $i }}
+  name: {{ include "common.fullname" $root }}-data-{{ $i }}
   namespace: {{ $root.Release.Namespace }}
   labels:
     type: {{ $root.Values.persistence.storageType }}
-    app: {{ $root.Values.service.name }}
+    app: {{ include "common.fullname" $root }}
     chart: {{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }}
     release: {{ $root.Release.Name }}
     heritage: {{ $root.Release.Service }}
 spec:
   capacity:
-    storage: {{ $root.Values.persistence.size }}
+    storage: {{ $root.Values.persistence.storage }}
   accessModes:
     - {{ $root.Values.persistence.accessMode }}
+  storageClassName: "{{ include "common.fullname" $root }}-data"
   hostPath:
     path: {{ $root.Values.persistence.mountPath }}/{{ $root.Release.Name }}/{{ $root.Values.persistence.mountSubPath }}-{{ $i }}
   persistentVolumeReclaimPolicy: {{ $root.Values.persistence.volumeReclaimPolicy }}
index ccc6b69..8b6a534 100644 (file)
 apiVersion: apps/v1beta1
 kind: StatefulSet
 metadata:
-  name: {{ include "common.servicename" .  }}
+  name: {{ include "common.fullname" .  }}
   labels:
     heritage: "{{ .Release.Service }}"
     release: "{{ .Release.Name }}"
     chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
-    app: {{ template "common.name" . }}
+    app: {{ include "common.name" . }}
 spec:
-  serviceName: {{ include "common.servicename" .  }}
+  serviceName: {{ include "common.servicename" .}}
   replicas: {{ .Values.replicaCount }}
   template:
     metadata:
@@ -45,7 +45,7 @@ spec:
 {{ toYaml .Values.tolerations | indent 8 }}
 {{- end }}
       containers:
-      - name: {{ include "common.servicename" .  }}
+      - name: {{ include "common.fullname" .  }}
         image: "{{ .Values.repository }}/{{ .Values.image }}"
         imagePullPolicy: "{{ .Values.pullPolicy }}"
         ports:
@@ -72,6 +72,8 @@ spec:
         - name: INITIAL_CLUSTER_SIZE
           value: {{ .Values.replicaCount | quote }}
         - name: SET_NAME
+          value: {{ include "common.fullname" . }}
+        - name: SERVICE_NAME
           value: {{ include "common.servicename" . }}
 {{- if .Values.extraEnv }}
 {{ toYaml .Values.extraEnv | indent 8 }}
@@ -85,13 +87,13 @@ spec:
                 - |
                   EPS=""
                   for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
-                      EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
+                      EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
                   done
 
                   HOSTNAME=$(hostname)
 
                   member_hash() {
-                      etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
+                      etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
                   }
 
                   SET_ID=${HOSTNAME##*[^0-9]}
@@ -113,28 +115,28 @@ spec:
             # store member id into PVC for later member replacement
             collect_member() {
                 while ! etcdctl member list &>/dev/null; do sleep 1; done
-                etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
+                etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
                 exit 0
             }
 
             eps() {
                 EPS=""
                 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
-                    EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
+                    EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
                 done
                 echo ${EPS}
             }
 
             member_hash() {
-                etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
+                etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
             }
 
             # we should wait for other pods to be up before trying to join
             # otherwise we got "no such host" errors when trying to resolve other members
             for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
                 while true; do
-                    echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
-                    ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
+                    echo "Waiting for ${SET_NAME}-${i}.${SERVICE_NAME} to come up"
+                    ping -W 1 -c 1 ${SET_NAME}-${i}.${SERVICE_NAME} > /dev/null && break
                     sleep 1s
                 done
             done
@@ -145,11 +147,11 @@ spec:
                 member_id=$(cat /var/run/etcd/member_id)
 
                 # re-join member
-                ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380 | true
+                ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SERVICE_NAME}:2380 | true
                 exec etcd --name ${HOSTNAME} \
                     --listen-peer-urls http://0.0.0.0:2380 \
                     --listen-client-urls http://0.0.0.0:2379\
-                    --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
+                    --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
                     --data-dir /var/run/etcd/default.etcd
             fi
 
@@ -170,7 +172,7 @@ spec:
                 fi
 
                 echo "Adding new member"
-                etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
+                etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SERVICE_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
 
                 if [ $? -ne 0 ]; then
                     echo "Exiting"
@@ -186,37 +188,37 @@ spec:
                 exec etcd --name ${HOSTNAME} \
                     --listen-peer-urls http://0.0.0.0:2380 \
                     --listen-client-urls http://0.0.0.0:2379 \
-                    --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
+                    --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
                     --data-dir /var/run/etcd/default.etcd \
-                    --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
+                    --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
                     --initial-cluster ${ETCD_INITIAL_CLUSTER} \
                     --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
             fi
 
             PEERS=""
             for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
-                PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
+                PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SERVICE_NAME}:2380"
             done
 
             collect_member &
 
             # join member
             exec etcd --name ${HOSTNAME} \
-                --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
+                --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
                 --listen-peer-urls http://0.0.0.0:2380 \
                 --listen-client-urls http://0.0.0.0:2379 \
-                --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
+                --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
                 --initial-cluster-token etcd-cluster-1 \
                 --initial-cluster ${PEERS} \
                 --initial-cluster-state new \
                 --data-dir /var/run/etcd/default.etcd
         volumeMounts:
-        - name: {{ include "common.servicename" . }}-datadir
+        - name: {{ include "common.fullname" . }}-data
           mountPath: /var/run/etcd
   {{- if .Values.persistence.enabled }}
   volumeClaimTemplates:
   - metadata:
-      name: {{ include "common.servicename" . }}-data
+      name: {{ include "common.fullname" . }}-data
     spec:
       accessModes:
         - "{{ .Values.persistence.accessMode }}"
@@ -224,16 +226,10 @@ spec:
         requests:
           # upstream recommended max is 700M
           storage: "{{ .Values.persistence.storage }}"
-    {{- if .Values.persistence.storageClass }}
-    {{- if (eq "-" .Values.persistence.storageClass) }}
-      storageClassName: ""
-    {{- else }}
-      storageClassName: "{{ .Values.persistence.storageClass }}"
-    {{- end }}
-    {{- end }}
+      storageClassName: {{ include "common.fullname" . }}-data
   {{- else }}
       volumes:
-      - name: {{ include "common.servicename" . }}-datadir
+      - name: {{ include "common.fullname" . }}-data
       {{- if .Values.memoryMode }}
         emptyDir:
           medium: Memory
index a999b0c..341e35c 100644 (file)
@@ -57,7 +57,7 @@ persistence:
   ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
   ##   GKE, AWS & OpenStack)
   ##
-  storageClass: "-"
+  #storageClass: "-"
   accessMode: "ReadWriteOnce"
   storage: "1Gi"
   mountPath: /dockerdata-nfs
index 566af50..7754685 100644 (file)
@@ -22,4 +22,6 @@ dependencies:
   - name: mongo
     version: ~4.x-0
     repository: '@local'
-
+  - name: etcd
+    version: ~4.x-0
+    repository: '@local'
index e451421..d6fa40d 100644 (file)
@@ -6,5 +6,6 @@
 
     "database-type": "mongo",
     "database-address": "multicloud-k8s-mongo",
+    "etcd-ip": "multicloud-k8s-etcd",
     "plugin-dir": "/opt/multicloud/k8splugin/plugins"
 }
\ No newline at end of file
index 15aade5..22ddd17 100644 (file)
@@ -75,6 +75,14 @@ mongo:
     enabled: true
   disableNfsProvisioner: true
 
+#etcd chart overrides for k8splugin
+etcd:
+  nameOverride: multicloud-k8s-etcd
+  service:
+    name: multicloud-k8s-etcd
+  persistence:
+    mountSubPath: multicloud-k8s/etcd/data
+    enabled: true
 
 # No persistence right now as we rely on Mongo to handle that
 persistence:
index 9b96a32..eb49a69 100755 (executable)
@@ -113,6 +113,7 @@ function enable_odl_cluster(){
 # Install SDN-C platform components if not already installed and start container
 
 ODL_HOME=${ODL_HOME:-/opt/opendaylight/current}
+ODL_ADMIN_USERNAME=${ODL_ADMIN_USERNAME:-admin}
 ODL_ADMIN_PASSWORD=${ODL_ADMIN_PASSWORD:-Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U}
 SDNC_HOME=${SDNC_HOME:-/opt/onap/sdnc}
 SDNC_BIN=${SDNC_BIN:-/opt/onap/sdnc/bin}
@@ -125,6 +126,7 @@ GEO_ENABLED=${GEO_ENABLED:-false}
 DBINIT_DIR=${DBINIT_DIR:-/opt/opendaylight/current/daexim}
 SDNRWT=${SDNRWT:-false}
 SDNRWT_BOOTFEATURES=${SDNRWT_BOOTFEATURES:-sdnr-wt-feature-aggregator}
+export ODL_ADMIN_PASSWORD ODL_ADMIN_USERNAME
 
 echo "Settings:"
 echo "  ENABLE_ODL_CLUSTER=$ENABLE_ODL_CLUSTER"