Merge "[UUI] Service Mesh Compliance for UUI"
[oom.git] / kubernetes / common / etcd / templates / statefulset.yaml
1 {{/*
2 # Copyright © 2019 Intel Corporation Inc
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #       http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 */}}
16 apiVersion: apps/v1
17 kind: StatefulSet
18 metadata:
19   name: {{ include "common.fullname" .  }}
20   labels:
21     heritage: "{{ .Release.Service }}"
22     release: "{{ include "common.release" . }}"
23     chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
24     app: {{ include "common.name" . }}
25 spec:
26   serviceName: {{ include "common.servicename" .}}
27   replicas: {{ .Values.replicaCount }}
28   selector:
29     matchLabels:
30       app: {{ include "common.name" .  }}
31   template:
32     metadata:
33       labels:
34         heritage: "{{ .Release.Service }}"
35         release: "{{ include "common.release" . }}"
36         chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
37         app: {{ include "common.name" . }}
38     spec:
39 {{- if .Values.affinity }}
40       affinity:
41 {{ toYaml .Values.affinity | indent 8 }}
42 {{- end }}
43 {{- if .Values.nodeSelector }}
44       nodeSelector:
45 {{ toYaml .Values.nodeSelector | indent 8 }}
46 {{- end }}
47 {{- if .Values.tolerations }}
48       tolerations:
49 {{ toYaml .Values.tolerations | indent 8 }}
50 {{- end }}
51       imagePullSecrets:
52       - name: "{{ include "common.namespace" . }}-docker-registry-key"
53       containers:
54       - name: {{ include "common.name" .  }}
55         image: {{ include "repositoryGenerator.googleK8sRepository" . }}/{{ .Values.image }}
56         imagePullPolicy: "{{ .Values.pullPolicy }}"
57         ports:
58         - containerPort: {{ .Values.service.peerInternalPort }}
59           name: {{ .Values.service.peerPortName }}
60         - containerPort: {{ .Values.service.clientInternalPort }}
61           name: {{ .Values.service.clientPortName }}
62         {{- if eq .Values.liveness.enabled true }}
63         livenessProbe:
64           tcpSocket:
65             port: {{ .Values.service.clientInternalPort }}
66           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
67           periodSeconds: {{ .Values.liveness.periodSeconds }}
68           timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
69         {{ end -}}
70         resources:
71 {{ include "common.resources" . | indent 10 }}
72         env:
73         - name: INITIAL_CLUSTER_SIZE
74           value: {{ .Values.replicaCount | quote }}
75         - name: SET_NAME
76           value: {{ include "common.fullname" . }}
77         - name: SERVICE_NAME
78           value: {{ include "common.servicename" . }}.{{ include "common.namespace" . }}.svc.{{ .Values.global.clusterName }}
79 {{- if .Values.extraEnv }}
80 {{ toYaml .Values.extraEnv | indent 8 }}
81 {{- end }}
82         lifecycle:
83           preStop:
84             exec:
85               command:
86                 - "/bin/sh"
87                 - "-ec"
88                 - |
89                   EPS=""
90                   for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
91                       EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
92                   done
93
94                   HOSTNAME=$(hostname)
95
96                   member_hash() {
97                       etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
98                   }
99
100                   SET_ID=${HOSTNAME##*[^0-9]}
101
102                   if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
103                       echo "Removing ${HOSTNAME} from etcd cluster"
104                       ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
105                       if [ $? -eq 0 ]; then
106                           # Remove everything otherwise the cluster will no longer scale-up
107                           rm -rf /var/run/etcd/*
108                       fi
109                   fi
110         command:
111           - "/bin/sh"
112           - "-ec"
113           - |
114             HOSTNAME=$(hostname)
115
116             # store member id into PVC for later member replacement
117             collect_member() {
118                 while ! etcdctl member list &>/dev/null; do sleep 1; done
119                 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
120                 exit 0
121             }
122
123             eps() {
124                 EPS=""
125                 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
126                     EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
127                 done
128                 echo ${EPS}
129             }
130
131             member_hash() {
132                 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
133             }
134
135             # we should wait for other pods to be up before trying to join
136             # otherwise we got "no such host" errors when trying to resolve other members
137             for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
138                 if [ "${SET_NAME}-${i}" == "${HOSTNAME}" ]; then
139                     echo "Skipping self-checking"
140                     continue
141                 fi
142                 while true; do
143                     echo "Waiting for ${SET_NAME}-${i}.${SERVICE_NAME} to come up"
144                     ping -W 1 -c 1 ${SET_NAME}-${i}.${SERVICE_NAME} > /dev/null && break
145                     sleep 1s
146                 done
147             done
148
149             # re-joining after failure?
150             if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then
151                 echo "Re-joining etcd member"
152                 member_id=$(cat /var/run/etcd/member_id)
153
154                 # re-join member
155                 ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SERVICE_NAME}:2380 | true
156                 exec etcd --name ${HOSTNAME} \
157                     --listen-peer-urls http://0.0.0.0:2380 \
158                     --listen-client-urls http://0.0.0.0:2379\
159                     --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
160                     --data-dir /var/run/etcd/default.etcd
161             fi
162
163             # etcd-SET_ID
164             SET_ID=${HOSTNAME##*[^0-9]}
165
166             # adding a new member to existing cluster (assuming all initial pods are available)
167             if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
168                 export ETCDCTL_ENDPOINT=$(eps)
169
170                 # member already added?
171                 MEMBER_HASH=$(member_hash)
172                 if [ -n "${MEMBER_HASH}" ]; then
173                     # the member hash exists but for some reason etcd failed
174                     # as the datadir has not be created, we can remove the member
175                     # and retrieve new hash
176                     etcdctl member remove ${MEMBER_HASH}
177                 fi
178
179                 echo "Adding new member"
180                 etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SERVICE_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
181
182                 if [ $? -ne 0 ]; then
183                     echo "Exiting"
184                     rm -f /var/run/etcd/new_member_envs
185                     exit 1
186                 fi
187
188                 cat /var/run/etcd/new_member_envs
189                 . /var/run/etcd/new_member_envs
190
191                 collect_member &
192
193                 exec etcd --name ${HOSTNAME} \
194                     --listen-peer-urls http://0.0.0.0:2380 \
195                     --listen-client-urls http://0.0.0.0:2379 \
196                     --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
197                     --data-dir /var/run/etcd/default.etcd \
198                     --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
199                     --initial-cluster ${ETCD_INITIAL_CLUSTER} \
200                     --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
201             fi
202
203             PEERS=""
204             for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
205                 PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SERVICE_NAME}:2380"
206             done
207
208             collect_member &
209
210             # join member
211             exec etcd --name ${HOSTNAME} \
212                 --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
213                 --listen-peer-urls http://0.0.0.0:2380 \
214                 --listen-client-urls http://0.0.0.0:2379 \
215                 --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
216                 --initial-cluster-token etcd-cluster-1 \
217                 --initial-cluster ${PEERS} \
218                 --initial-cluster-state new \
219                 --data-dir /var/run/etcd/default.etcd
220         volumeMounts:
221         - name: {{ include "common.fullname" . }}-data
222           mountPath: /var/run/etcd
223   {{- if .Values.persistence.enabled }}
224   volumeClaimTemplates:
225   - metadata:
226       name: {{ include "common.fullname" . }}-data
227       labels:
228         name: {{ include "common.fullname" . }}
229         chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
230         release: "{{ include "common.release" . }}"
231         heritage: "{{ .Release.Service }}"
232     spec:
233       accessModes:
234       - "{{ .Values.persistence.accessMode }}"
235       storageClassName: {{ include "common.storageClass" . }}
236       resources:
237         requests:
238           # upstream recommended max is 700M
239           storage: "{{ .Values.persistence.storage }}"
240   {{- else }}
241       volumes:
242       - name: {{ include "common.fullname" . }}-data
243       {{- if .Values.memoryMode }}
244         emptyDir:
245           medium: Memory
246       {{- else }}
247         emptyDir: {}
248       {{- end }}
249   {{- end }}