Merge "[SDNC] Install latest service release (SR3) of OpenDaylight Aluminum"
authorSylvain Desbureaux <sylvain.desbureaux@orange.com>
Mon, 17 May 2021 12:44:26 +0000 (12:44 +0000)
committerGerrit Code Review <gerrit@onap.org>
Mon, 17 May 2021 12:44:26 +0000 (12:44 +0000)
32 files changed:
kubernetes/appc/resources/config/appc/opt/onap/appc/bin/startODL.sh
kubernetes/cli/values.yaml
kubernetes/common/Makefile
kubernetes/common/music/components/music-cassandra/templates/job.yaml
kubernetes/common/music/components/music-cassandra/templates/statefulset.yaml
kubernetes/common/music/components/music-cassandra/values.yaml
kubernetes/common/roles-wrapper/Chart.yaml [new file with mode: 0644]
kubernetes/common/roles-wrapper/requirements.yaml [new file with mode: 0644]
kubernetes/common/roles-wrapper/templates/role.yaml [new file with mode: 0644]
kubernetes/common/roles-wrapper/values.yaml [new file with mode: 0644]
kubernetes/common/serviceAccount/templates/role-binding.yaml
kubernetes/common/serviceAccount/templates/role.yaml
kubernetes/common/serviceAccount/templates/service-account.yaml
kubernetes/common/serviceAccount/values.yaml
kubernetes/dcaegen2-services/common/dcaegen2-services-common/templates/_deployment.tpl
kubernetes/helm/plugins/deploy/deploy.sh
kubernetes/helm/plugins/undeploy/undeploy.sh
kubernetes/onap/requirements.yaml
kubernetes/onap/values.yaml
kubernetes/portal/components/portal-app/templates/deployment.yaml
kubernetes/sdc/components/sdc-be/templates/deployment.yaml
kubernetes/sdc/components/sdc-be/values.yaml
kubernetes/sdc/components/sdc-fe/templates/deployment.yaml
kubernetes/sdc/components/sdc-fe/values.yaml
kubernetes/sdc/components/sdc-helm-validator/templates/deployment.yaml
kubernetes/sdc/components/sdc-helm-validator/values.yaml
kubernetes/sdc/components/sdc-onboarding-be/templates/deployment.yaml
kubernetes/sdc/components/sdc-onboarding-be/values.yaml
kubernetes/sdc/components/sdc-wfd-be/templates/deployment.yaml
kubernetes/sdc/components/sdc-wfd-be/values.yaml
kubernetes/sdc/components/sdc-wfd-fe/templates/deployment.yaml
kubernetes/sdc/components/sdc-wfd-fe/values.yaml

index 85f5aac..717ea66 100755 (executable)
@@ -44,7 +44,7 @@ enable_odl_cluster () {
   node_index=($(echo ${hm} | awk -F"-" '{print $NF}'))
   node_list="${node}-0.{{ .Values.service.name }}-cluster.{{.Release.Namespace}}";
 
-  for ((i=1;i<${APPC_REPLICAS};i++));
+  for i in $(seq 1 $((${APPC_REPLICAS}-1)));
   do
     node_list="${node_list} ${node}-$i.{{ .Values.service.name }}-cluster.{{.Release.Namespace}}"
   done
index c521fb8..4dcee45 100644 (file)
@@ -35,33 +35,24 @@ certInitializer:
   cadi_latitude: "0.0"
   credsPath: /opt/app/osaaf/local
   aaf_add_config: |
-    echo "*** retrieving password for keystore and trustore"
-    export $(/opt/app/aaf_config/bin/agent.sh local showpass \
-      {{.Values.fqi}} {{ .Values.fqdn }} | grep '^c' | xargs -0)
-    if [ -z "$cadi_keystore_password_p12" ]
-    then
-      echo "  /!\ certificates retrieval failed"
-      exit 1
-    else
-      echo "*** transform AAF certs into pem files"
-      mkdir -p {{ .Values.credsPath }}/certs
-      keytool -exportcert -rfc -file {{ .Values.credsPath }}/certs/cacert.pem \
-        -keystore {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.trust.jks \
-        -alias ca_local_0 \
-        -storepass $cadi_truststore_password
-      openssl pkcs12 -in {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.p12 \
-        -nokeys -out {{ .Values.credsPath }}/certs/cert.pem \
-        -passin pass:$cadi_keystore_password_p12 \
-        -passout pass:$cadi_keystore_password_p12
-      echo "*** generating needed file"
-      cat {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.key \
-          {{ .Values.credsPath }}/certs/cert.pem \
-          {{ .Values.credsPath }}/certs/cacert.pem \
-          > {{ .Values.credsPath }}/certs/fullchain.pem;
-      cat {{ .Values.credsPath }}/certs/fullchain.pem
-      echo "*** change ownership of certificates to targeted user"
-      chown -R 33 {{ .Values.credsPath }}
-    fi
+    echo "*** transform AAF certs into pem files"
+    mkdir -p {{ .Values.credsPath }}/certs
+    keytool -exportcert -rfc -file {{ .Values.credsPath }}/certs/cacert.pem \
+      -keystore {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.trust.jks \
+      -alias ca_local_0 \
+      -storepass $cadi_truststore_password
+    openssl pkcs12 -in {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.p12 \
+      -nokeys -out {{ .Values.credsPath }}/certs/cert.pem \
+      -passin pass:$cadi_keystore_password_p12 \
+      -passout pass:$cadi_keystore_password_p12
+    echo "*** generating needed file"
+    cat {{ .Values.credsPath }}/{{ .Values.fqi_namespace }}.key \
+        {{ .Values.credsPath }}/certs/cert.pem \
+        {{ .Values.credsPath }}/certs/cacert.pem \
+        > {{ .Values.credsPath }}/certs/fullchain.pem;
+    cat {{ .Values.credsPath }}/certs/fullchain.pem
+    echo "*** change ownership of certificates to targeted user"
+    chown -R 33 {{ .Values.credsPath }}
 
 
 #################################################################
index c7aba63..6442068 100644 (file)
@@ -21,7 +21,7 @@ COMMON_CHARTS_DIR := common
 
 EXCLUDES :=
 PROCESSED_LAST := cert-wrapper repository-wrapper
-PROCESSED_FIRST := repositoryGenerator readinessCheck certInitializer
+PROCESSED_FIRST := repositoryGenerator readinessCheck serviceAccount certInitializer
 TO_FILTER := $(PROCESSED_FIRST) $(EXCLUDES) $(PROCESSED_LAST)
 
 HELM_BIN := helm
index 3cf1ae3..d3c89d4 100644 (file)
@@ -39,8 +39,6 @@ spec:
         command:
         - /app/ready.py
         args:
-        - --timeout
-        - "{{ .Values.readinessTimeout }}"
         - --container-name
         - music-cassandra
         env:
@@ -87,4 +85,3 @@ spec:
       restartPolicy: Never
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
-
index 2a1fb4f..1aabfb6 100644 (file)
@@ -73,6 +73,17 @@ spec:
           timeoutSeconds: {{ .Values.readiness.timeoutSeconds }}
           successThreshold: {{ .Values.readiness.successThreshold }}
           failureThreshold: {{ .Values.readiness.failureThreshold }}
+        startupProbe:
+          exec:
+            command:
+            - /bin/bash
+            - -c
+            - nodetool status | grep $POD_IP | awk '$1!="UN" { exit 1; }'
+          initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+          periodSeconds: {{ .Values.startup.periodSeconds }}
+          timeoutSeconds: {{ .Values.startup.timeoutSeconds }}
+          successThreshold: {{ .Values.startup.successThreshold }}
+          failureThreshold: {{ .Values.startup.failureThreshold }}
         lifecycle:
           preStop:
             exec:
index 8530172..92ed723 100644 (file)
@@ -18,7 +18,7 @@ global:
   nodePortPrefix: 302
   persistence: {}
 
-replicaCount: 3
+replicaCount: 1
 
 # Cassandra Image - This image is modified from the original on
 # Docker Hub where the Security has been turned on.
@@ -72,8 +72,8 @@ cql:
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 120
-  periodSeconds: 20
+  initialDelaySeconds: 1
+  periodSeconds: 10
   timeoutSeconds: 10
   successThreshold: 1
   failureThreshold: 3
@@ -81,15 +81,20 @@ liveness:
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
-readinessTimeout: 240
-
 readiness:
-  initialDelaySeconds: 10
-  periodSeconds: 20
+  initialDelaySeconds: 1
+  periodSeconds: 10
   timeoutSeconds: 10
   successThreshold: 1
   failureThreshold: 3
 
+startup:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  timeoutSeconds: 10
+  successThreshold: 1
+  failureThreshold: 90
+
 podManagementPolicy: OrderedReady
 updateStrategy:
   type: OnDelete
diff --git a/kubernetes/common/roles-wrapper/Chart.yaml b/kubernetes/common/roles-wrapper/Chart.yaml
new file mode 100644 (file)
index 0000000..862773f
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright © 2021 Orange
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+description: Wrapper chart to allow default roles to be shared among onap instances
+name: roles-wrapper
+version: 8.0.0
diff --git a/kubernetes/common/roles-wrapper/requirements.yaml b/kubernetes/common/roles-wrapper/requirements.yaml
new file mode 100644 (file)
index 0000000..b2d51ef
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright © 2021 Orange
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: common
+    version: ~8.x-0
+    repository: 'file://../common'
diff --git a/kubernetes/common/roles-wrapper/templates/role.yaml b/kubernetes/common/roles-wrapper/templates/role.yaml
new file mode 100644 (file)
index 0000000..e2a84b4
--- /dev/null
@@ -0,0 +1,110 @@
+{{/*
+# Copyright © 2020 Orange
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- $dot := . -}}
+{{- range $role_type := $dot.Values.roles }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: {{ printf "%s-%s" (include "common.release" $dot) $role_type }}
+  namespace: {{ include "common.namespace" $dot }}
+rules:
+{{-   if eq $role_type "read" }}
+- apiGroups:
+  - "" # "" indicates the core API group
+  - apps
+  - batch
+  - extensions
+  resources:
+  - pods
+  - deployments
+  - jobs
+  - jobs/status
+  - statefulsets
+  - replicasets
+  - replicasets/status
+  - daemonsets
+  verbs:
+  - get
+  - watch
+  - list
+{{-   else  }}
+{{-     if eq $role_type "create" }}
+- apiGroups:
+  - "" # "" indicates the core API group
+  - apps
+  - batch
+  - extensions
+  resources:
+  - pods
+  - deployments
+  - jobs
+  - jobs/status
+  - statefulsets
+  - replicasets
+  - replicasets/status
+  - daemonsets
+  - secrets
+  verbs:
+  - get
+  - watch
+  - list
+- apiGroups:
+  - "" # "" indicates the core API group
+  - apps
+  resources:
+  - statefulsets
+  verbs:
+  - patch
+- apiGroups:
+  - "" # "" indicates the core API group
+  - apps
+  resources:
+  - deployments
+  - secrets
+  verbs:
+  - create
+- apiGroups:
+  - "" # "" indicates the core API group
+  - apps
+  resources:
+  - pods
+  - persistentvolumeclaims
+  - secrets
+  - deployment
+  verbs:
+  - delete
+- apiGroups:
+  - "" # "" indicates the core API group
+  - apps
+  resources:
+  - pods/exec
+  verbs:
+  - create
+{{-     else }}
+# if you don't match read or create, then you're not allowed to use API
+# except to see basic information about yourself
+- apiGroups:
+  - authorization.k8s.io
+  resources:
+  - selfsubjectaccessreviews
+  - selfsubjectrulesreviews
+  verbs:
+  - create
+{{-     end }}
+{{-   end }}
+{{- end }}
diff --git a/kubernetes/common/roles-wrapper/values.yaml b/kubernetes/common/roles-wrapper/values.yaml
new file mode 100644 (file)
index 0000000..8a53d7d
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright © 2021 Orange
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+roles:
+  - nothing
+  - read
+  - create
index 2082f84..7c272ae 100644 (file)
 
 {{- $dot := . -}}
 {{- range $role_type := $dot.Values.roles }}
+{{/* retrieve the names for generic roles */}}
+{{ $name := printf "%s-%s" (include "common.release" $dot) $role_type }}
+{{- if not (has $role_type $dot.Values.defaultRoles) }}
+{{ $name = include "common.fullname" (dict "suffix" $role_type "dot" $dot ) }}
+{{- end }}
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 # This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
 kind: RoleBinding
 metadata:
-  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot )}}
+  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot ) }}
   namespace: {{ include "common.namespace" $dot }}
 subjects:
 - kind: ServiceAccount
-  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot )}}
+  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot ) }}
 roleRef:
   kind: Role
-  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot )}}
+  name: {{ $name }}
   apiGroup: rbac.authorization.k8s.io
 {{- end }}
+
index 6d12164..2055885 100644 (file)
 # limitations under the License.
 */}}
 
-{{-   $dot := . -}}
+{{- $dot := . -}}
 {{- range $role_type := $dot.Values.roles }}
+{{/* Default roles are already created, just creating specific ones */}}
+{{-   if not (has $role_type $dot.Values.defaultRoles) }}
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: Role
 metadata:
-  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot )}}
+  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot ) }}
   namespace: {{ include "common.namespace" $dot }}
 rules:
-{{- if eq $role_type "read" }}
-- apiGroups:
-  - "" # "" indicates the core API group
-  - apps
-  - batch
-  - extensions
-  resources:
-  - pods
-  - deployments
-  - jobs
-  - jobs/status
-  - statefulsets
-  - replicasets
-  - replicasets/status
-  - daemonsets
-  verbs:
-  - get
-  - watch
-  - list
-{{- else  }}
-{{-   if eq $role_type "create" }}
-- apiGroups:
-  - "" # "" indicates the core API group
-  - apps
-  - batch
-  - extensions
-  resources:
-  - pods
-  - deployments
-  - jobs
-  - jobs/status
-  - statefulsets
-  - replicasets
-  - replicasets/status
-  - daemonsets
-  - secrets
-  verbs:
-  - get
-  - watch
-  - list
-- apiGroups:
-  - "" # "" indicates the core API group
-  - apps
-  resources:
-  - statefulsets
-  verbs:
-  - patch
-- apiGroups:
-  - "" # "" indicates the core API group
-  - apps
-  resources:
-  - deployments
-  - secrets
-  verbs:
-  - create
-- apiGroups:
-  - "" # "" indicates the core API group
-  - apps
-  resources:
-  - pods
-  - persistentvolumeclaims
-  - secrets
-  - deployment
-  verbs:
-  - delete
+{{-     if hasKey $dot.Values.new_roles_definitions $role_type  }}
+{{ include "common.tplValue" ( dict "value" (index $dot.Values.new_roles_definitions $role_type ) "context" $dot) }}
+{{-     else}}
+# if no rules are provided, you're back to 'nothing' role
 - apiGroups:
-  - "" # "" indicates the core API group
-  - apps
+  - authorization.k8s.io
   resources:
-  - pods/exec
+  - selfsubjectaccessreviews
+  - selfsubjectrulesreviews
   verbs:
   - create
-{{-   else }}
-{{-     if hasKey $dot.Values.new_roles_definitions $role_type  }}
-{{ include "common.tplValue" ( dict "value" (index $dot.Values.new_roles_definitions $role_type ) "context" $dot) }}
-{{-     else}}
-# if you don't match read or create, then you're not allowed to use API
-- apiGroups: []
-  resources: []
-  verbs: []
 {{-     end }}
 {{-   end }}
 {{- end }}
-{{- end }}
index 449bea6..20bd94f 100644 (file)
@@ -20,5 +20,5 @@
 apiVersion: v1
 kind: ServiceAccount
 metadata:
-  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot )}}
-{{- end }}
+  name: {{ include "common.fullname" (dict "suffix" $role_type "dot" $dot ) }}
+{{- end }}
\ No newline at end of file
index afa8194..22faeb6 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Default roles will be created by roles wrapper
+# It won't work if roles wrapper is disabled.
 roles:
   - nothing
 # - read
 # - create
 
+defaultRoles:
+  - nothing
+  - read
+  - create
+
 new_roles_definitions: {}
 #  few-read:
 #    - apiGroups:
index 10a63eb..5de5262 100644 (file)
@@ -69,6 +69,83 @@ the the literal string "An example value".
   {{- end }}
 {{- end -}}
 {{/*
+For internal use only!
+
+dcaegen2-services-common._externalVolumes:
+This template generates a list of volumes associated with the pod,
+based on information provided in .Values.externalVolumes.  This
+template works in conjunction with dcaegen2-services-common._externalVolumeMounts
+to give the microservice access to data in volumes created else.
+This initial implementation supports ConfigMaps only, as this is the only
+external volume mounting required by current microservices.
+
+.Values.externalValues is a list of objects.  Each object has 3 required fields and 1 optional field:
+   - name: the name of the resource (in the current implementation, it must be a ConfigMap)
+     that is to be set up as a volume.  The value is a case sensitive string.  Because the
+     names of resources are sometimes set at deployment time (for instance, to prefix the Helm
+     release to the name), the string can be a Helm template fragment that will be expanded at
+     deployment time.
+   - type: the type of the resource (in the current implementation, only "ConfigMap" is supported).
+     The value is a case-INsensitive string.
+   - mountPoint: the path to the mount point for the volume in the container file system.  The
+     value is a case-sensitive string.
+   - readOnly: (Optional) Boolean flag.  Set to true to mount the volume as read-only.
+     Defaults to false.
+
+Here is an example fragment from a values.yaml file for a microservice:
+
+externalVolumes:
+  - name: my-example-configmap
+    type: configmap
+    mountPath: /opt/app/config
+  - name: '{{ include "common.release" . }}-another-example'
+    type: configmap
+    mountPath: /opt/app/otherconfig
+*/}}
+{{- define "dcaegen2-services-common._externalVolumes" -}}
+  {{- $global := . -}}
+  {{- if .Values.externalVolumes }}
+    {{- range $vol := .Values.externalVolumes }}
+      {{- if eq (lower $vol.type) "configmap" }}
+        {{- $vname := (tpl $vol.name $global) }}
+- configMap:
+    defaultMode: 420
+    name: {{ $vname }}
+  name: {{ $vname }}
+      {{- end }}
+    {{- end }}
+  {{- end }}
+{{- end }}
+{{/*
+For internal use only!
+
+dcaegen2-services-common._externalVolumeMounts:
+This template generates a list of volume mounts for the microservice container,
+based on information provided in .Values.externalVolumes.  This
+template works in conjunction with dcaegen2-services-common._externalVolumes
+to give the microservice access to data in volumes created else.
+This initial implementation supports ConfigMaps only, as this is the only
+external volume mounting required by current microservices.
+
+See the documentation for dcaegen2-services-common._externalVolumes for
+details on how external volumes are specified in the values.yaml file for
+the microservice.
+*/}}
+{{- define "dcaegen2-services-common._externalVolumeMounts" -}}
+  {{- $global := . -}}
+  {{- if .Values.externalVolumes }}
+    {{- range $vol := .Values.externalVolumes }}
+      {{- if eq (lower $vol.type) "configmap" }}
+        {{- $vname := (tpl $vol.name $global) -}}
+        {{- $readOnly := $vol.readOnly | default false }}
+- mountPath: {{ $vol.mountPath }}
+  name: {{ $vname }}
+  readOnly: {{ $readOnly }}
+      {{- end }}
+    {{- end }}
+  {{- end }}
+{{- end }}
+{{/*
 dcaegen2-services-common.microserviceDeployment:
 This template produces a Kubernetes Deployment for a DCAE microservice.
 
@@ -255,6 +332,7 @@ spec:
         - name: policy-shared
           mountPath: /etc/policies
         {{- end }}
+        {{- include "dcaegen2-services-common._externalVolumeMounts" . | nindent 8 }}
       {{- if $logDir }}
       - image: {{ include "repositoryGenerator.image.logging" . }}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
@@ -352,6 +430,7 @@ spec:
       - name: policy-shared
         emptyDir: {}
       {{- end }}
+      {{- include "dcaegen2-services-common._externalVolumes" . | nindent 6 }}
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
 {{ end -}}
index 44e8e56..0d434ad 100755 (executable)
@@ -70,7 +70,7 @@ generate_overrides() {
 resolve_deploy_flags() {
   flags=($1)
   n=${#flags[*]}
-  for (( i = 0; i < n; i++ )); do
+  i=0 ; while [ "$i" -lt "$n" ]; do
     PARAM=${flags[i]}
     if [[ $PARAM = "-f" || \
           $PARAM = "--values" || \
@@ -82,6 +82,7 @@ resolve_deploy_flags() {
     else
       DEPLOY_FLAGS="$DEPLOY_FLAGS $PARAM"
     fi
+    i=$((i+1))
   done
   echo "$DEPLOY_FLAGS"
 }
@@ -255,7 +256,7 @@ deploy() {
     else
       array=($(echo "$ALL_HELM_RELEASES" | grep "${RELEASE}-${subchart}"))
       n=${#array[*]}
-      for (( i = n-1; i >= 0; i-- )); do
+      for i in $(seq $(($n-1)) -1 0); do
         if [[ $HELM_VER = "v3."* ]]; then
           helm del "${array[i]}"
         else
index e5c0c12..1689bf1 100755 (executable)
@@ -23,7 +23,7 @@ undeploy() {
 
   array=($(helm ls -q --all | grep $RELEASE))
   n=${#array[*]}
-  for (( i = n-1; i >= 0; i-- ))
+  for i in $(seq $(($n-1)) -1 0)
   do
     helm del "${array[i]}" $FLAGS
   done
index 6034063..fa3efd3 100755 (executable)
@@ -169,3 +169,7 @@ dependencies:
     version: ~8.x-0
     repository: '@local'
     condition: cert-wrapper.enabled
+  - name: roles-wrapper
+    version: ~8.x-0
+    repository: '@local'
+    condition: roles-wrapper.enabled
index ca9ccd4..d91284a 100755 (executable)
@@ -398,3 +398,5 @@ cert-wrapper:
   enabled: true
 repository-wrapper:
   enabled: true
+roles-wrapper:
+  enabled: true
index 71b2aa3..39393ef 100644 (file)
@@ -104,7 +104,7 @@ spec:
               -Djavax.net.ssl.keyStore="{{ .Values.certInitializer.credsPath }}/{{ .Values.certInitializer.keystoreFile }}"
               -Djavax.net.ssl.trustStore="{{ .Values.certInitializer.credsPath }}/{{ .Values.certInitializer.truststoreFile }}"
         {{- else }}
-        args: ["/start-apache-tomcat.sh -i "" -n "" -b {{ .Values.global.env.tomcatDir }}"]
+        args: ["/start-apache-tomcat.sh -i \"\" -n \"\" -b {{ .Values.global.env.tomcatDir }}"]
         {{- end }}
         ports:
         - containerPort: {{ .Values.service.internalPort }}
index 4443986..28e9c13 100644 (file)
@@ -124,6 +124,8 @@ spec:
             initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
             periodSeconds: {{ .Values.liveness.periodSeconds }}
             timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+            successThreshold: {{ .Values.liveness.successThreshold }}
+            failureThreshold: {{ .Values.liveness.failureThreshold }}
           {{ end }}
           readinessProbe:
             exec:
@@ -132,6 +134,18 @@ spec:
             initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
             periodSeconds: {{ .Values.readiness.periodSeconds }}
             timeoutSeconds: {{ .Values.readiness.timeoutSeconds }}
+            successThreshold: {{ .Values.readiness.successThreshold }}
+            failureThreshold: {{ .Values.readiness.failureThreshold }}
+          resources: {{ include "common.resources" . | nindent 12 }}
+          startupProbe:
+            exec:
+              command:
+              - "/var/lib/jetty/ready-probe.sh"
+            initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+            periodSeconds: {{ .Values.startup.periodSeconds }}
+            timeoutSeconds: {{ .Values.startup.timeoutSeconds }}
+            successThreshold: {{ .Values.startup.successThreshold }}
+            failureThreshold: {{ .Values.startup.failureThreshold }}
           resources: {{ include "common.resources" . | nindent 12 }}
           env:
           - name: ENVNAME
index bdaea44..fc3b53f 100644 (file)
@@ -83,18 +83,29 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 120
+  initialDelaySeconds: 1
   periodSeconds: 10
   timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 3
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   port: api
   enabled: true
 
 readiness:
-  initialDelaySeconds: 60
+  initialDelaySeconds: 1
   periodSeconds: 10
   timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 3
+
+startup:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  timeoutSeconds: 5
+  successThreshold: 1
+  failureThreshold: 60
 
 service:
   type: NodePort
index 45c7bc8..0a5c0a3 100644 (file)
@@ -117,13 +117,25 @@ spec:
             initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
             periodSeconds: {{ .Values.liveness.periodSeconds }}
             timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+            successThreshold: {{ .Values.liveness.successThreshold }}
+            failureThreshold: {{ .Values.liveness.failureThreshold }}
           {{ end }}
           readinessProbe:
             tcpSocket:
               port: {{ .Values.service.internalPort2 }}
             initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
             periodSeconds: {{ .Values.readiness.periodSeconds }}
-            timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+            timeoutSeconds: {{ .Values.readiness.timeoutSeconds }}
+            successThreshold: {{ .Values.readiness.successThreshold }}
+            failureThreshold: {{ .Values.readiness.failureThreshold }}
+          startupProbe:
+            tcpSocket:
+              port: {{ .Values.service.internalPort2 }}
+            initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+            periodSeconds: {{ .Values.startup.periodSeconds }}
+            timeoutSeconds: {{ .Values.startup.timeoutSeconds }}
+            successThreshold: {{ .Values.startup.successThreshold }}
+            failureThreshold: {{ .Values.startup.failureThreshold }}
           resources: {{ include "common.resources" . | nindent 12 }}
           env:
           - name: ENVNAME
index 1e269d0..dde22b5 100644 (file)
@@ -76,17 +76,28 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 10
-  periodSeconds: 60
+  initialDelaySeconds: 1
+  periodSeconds: 10
   timeoutSeconds: 15
+  successThreshold: 1
+  failureThreshold: 3
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
+  initialDelaySeconds: 1
+  periodSeconds: 10
+  timeoutSeconds: 15
+  successThreshold: 1
+  failureThreshold: 3
+
+startup:
   initialDelaySeconds: 10
-  periodSeconds: 60
+  periodSeconds: 10
   timeoutSeconds: 15
+  successThreshold: 1
+  failureThreshold: 60
 
 service:
   #Example service definition with external, internal and node ports.
index 08228ad..f736a17 100644 (file)
@@ -40,5 +40,15 @@ spec:
               port: {{ .Values.liveness.port }}
             initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
             periodSeconds: {{ .Values.liveness.periodSeconds }}
+            successThreshold: {{ .Values.liveness.successThreshold }}
+            failureThreshold: {{ .Values.liveness.failureThreshold }}
+          startupProbe:
+            httpGet:
+              path: {{ .Values.startup.path }}
+              port: {{ .Values.startup.port }}
+            initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+            periodSeconds: {{ .Values.startup.periodSeconds }}
+            successThreshold: {{ .Values.startup.successThreshold }}
+            failureThreshold: {{ .Values.startup.failureThreshold }}
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 9c0d906..ede80a6 100644 (file)
@@ -31,14 +31,24 @@ service:
       port: *svc_port
 
 liveness:
-  initialDelaySeconds: 30
-  periodSeconds: 30
+  initialDelaySeconds: 1
+  periodSeconds: 10
   path: /actuator/health
+  successThreshold: 1
+  failureThreshold: 3
   port: *port
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
+startup:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  path: /actuator/health
+  successThreshold: 1
+  failureThreshold: 12
+  port: *port
+
 flavor: small
 resources:
   small:
index af53fd6..7251006 100644 (file)
@@ -128,6 +128,8 @@ spec:
             initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
             periodSeconds: {{ .Values.liveness.periodSeconds }}
             timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+            successThreshold: {{ .Values.liveness.successThreshold }}
+            failureThreshold: {{ .Values.liveness.failureThreshold }}
           {{ end }}
           readinessProbe:
             exec:
@@ -135,7 +137,18 @@ spec:
               - "/var/lib/jetty/ready-probe.sh"
             initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
             periodSeconds: {{ .Values.readiness.periodSeconds }}
-            timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+            timeoutSeconds: {{ .Values.readiness.timeoutSeconds }}
+            successThreshold: {{ .Values.readiness.successThreshold }}
+            failureThreshold: {{ .Values.readiness.failureThreshold }}
+          startupProbe:
+            exec:
+              command:
+              - "/var/lib/jetty/ready-probe.sh"
+            initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+            periodSeconds: {{ .Values.startup.periodSeconds }}
+            timeoutSeconds: {{ .Values.startup.timeoutSeconds }}
+            successThreshold: {{ .Values.startup.successThreshold }}
+            failureThreshold: {{ .Values.startup.failureThreshold }}
           resources: {{ include "common.resources" . | nindent 12 }}
           env:
           - name: ENVNAME
index d2dd808..f26a020 100644 (file)
@@ -83,17 +83,28 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 120
-  periodSeconds: 60
+  initialDelaySeconds: 1
+  periodSeconds: 10
   timeoutSeconds: 15
+  successThreshold: 1
+  failureThreshold: 3
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 120
-  periodSeconds: 60
+  initialDelaySeconds: 1
+  periodSeconds: 10
   timeoutSeconds: 15
+  successThreshold: 1
+  failureThreshold: 3
+
+startup:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  timeoutSeconds: 15
+  successThreshold: 1
+  failureThreshold: 60
 
 service:
   type: ClusterIP
index 9defb8e..de75092 100644 (file)
@@ -88,12 +88,23 @@ spec:
               port: {{ template "wfd-be.internalPort" . }}
             initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
             periodSeconds: {{ .Values.liveness.periodSeconds }}
+            successThreshold: {{ .Values.liveness.successThreshold }}
+            failureThreshold: {{ .Values.liveness.failureThreshold }}
           {{ end }}
           readinessProbe:
             tcpSocket:
               port: {{ template "wfd-be.internalPort" . }}
             initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
             periodSeconds: {{ .Values.readiness.periodSeconds }}
+            successThreshold: {{ .Values.readiness.successThreshold }}
+            failureThreshold: {{ .Values.readiness.failureThreshold }}
+          startupProbe:
+            tcpSocket:
+              port: {{ template "wfd-be.internalPort" . }}
+            initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+            periodSeconds: {{ .Values.startup.periodSeconds }}
+            successThreshold: {{ .Values.startup.successThreshold }}
+            failureThreshold: {{ .Values.startup.failureThreshold }}
           env:
           - name: JAVA_OPTIONS
             value: {{ .Values.config.javaOptions }}
index dbd6438..d4414f1 100644 (file)
@@ -101,6 +101,28 @@ readiness:
   initialDelaySeconds: 60
   periodSeconds: 10
 
+# probe configuration parameters
+liveness:
+  initialDelaySeconds: 1
+  periodSeconds: 10
+  successThreshold: 1
+  failureThreshold: 3
+  # necessary to disable liveness probe when setting breakpoints
+  # in debugger so K8s doesn't restart unresponsive container
+  enabled: true
+
+readiness:
+  initialDelaySeconds: 1
+  periodSeconds: 10
+  successThreshold: 1
+  failureThreshold: 3
+
+startup:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  successThreshold: 1
+  failureThreshold: 60
+
 service:
   type: NodePort
   portName: sdc-wfd-be
index 7a8cf8f..b8073d7 100644 (file)
@@ -105,12 +105,23 @@ spec:
               port: {{ template "wfd-fe.internalPort" . }}
             initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
             periodSeconds: {{ .Values.liveness.periodSeconds }}
+            successThreshold: {{ .Values.liveness.successThreshold }}
+            failureThreshold: {{ .Values.liveness.failureThreshold }}
           {{ end }}
           readinessProbe:
             tcpSocket:
               port: {{ template "wfd-fe.internalPort" . }}
             initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
             periodSeconds: {{ .Values.readiness.periodSeconds }}
+            successThreshold: {{ .Values.readiness.successThreshold }}
+            failureThreshold: {{ .Values.readiness.failureThreshold }}
+          startupProbe:
+            tcpSocket:
+              port: {{ template "wfd-fe.internalPort" . }}
+            initialDelaySeconds: {{ .Values.startup.initialDelaySeconds }}
+            periodSeconds: {{ .Values.startup.periodSeconds }}
+            successThreshold: {{ .Values.startup.successThreshold }}
+            failureThreshold: {{ .Values.startup.failureThreshold }}
           env:
           - name: ENVNAME
             value: {{ .Values.env.name }}
index e001f2f..3cc9b95 100644 (file)
@@ -77,15 +77,25 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 60
+  initialDelaySeconds: 1
   periodSeconds: 10
+  successThreshold: 1
+  failureThreshold: 3
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 60
+  initialDelaySeconds: 1
   periodSeconds: 10
+  successThreshold: 1
+  failureThreshold: 3
+
+startup:
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  successThreshold: 1
+  failureThreshold: 60
 
 service:
   type: NodePort