Merge "[COMMON] Make MariaDB compatible with Kubernetes v1.17"
[oom.git] / kubernetes / common / etcd / templates / statefulset.yaml
1 # Copyright © 2019 Intel Corporation Inc
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #       http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 apiVersion: apps/v1
15 kind: StatefulSet
16 metadata:
17   name: {{ include "common.fullname" .  }}
18   labels:
19     heritage: "{{ .Release.Service }}"
20     release: "{{ include "common.release" . }}"
21     chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
22     app: {{ include "common.name" . }}
23 spec:
24   serviceName: {{ include "common.servicename" .}}
25   replicas: {{ .Values.replicaCount }}
26   selector:
27     matchLabels:
28       app: {{ include "common.name" .  }}
29   template:
30     metadata:
31       labels:
32         heritage: "{{ .Release.Service }}"
33         release: "{{ include "common.release" . }}"
34         chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
35         app: {{ include "common.name" . }}
36     spec:
37 {{- if .Values.affinity }}
38       affinity:
39 {{ toYaml .Values.affinity | indent 8 }}
40 {{- end }}
41 {{- if .Values.nodeSelector }}
42       nodeSelector:
43 {{ toYaml .Values.nodeSelector | indent 8 }}
44 {{- end }}
45 {{- if .Values.tolerations }}
46       tolerations:
47 {{ toYaml .Values.tolerations | indent 8 }}
48 {{- end }}
49       containers:
50       - name: {{ include "common.fullname" .  }}
51         image: "{{ .Values.repository }}/{{ .Values.image }}"
52         imagePullPolicy: "{{ .Values.pullPolicy }}"
53         ports:
54         - containerPort: {{ .Values.service.peerInternalPort }}
55           name: {{ .Values.service.peerPortName }}
56         - containerPort: {{ .Values.service.clientInternalPort }}
57           name: {{ .Values.service.clientPortName }}
58         {{- if eq .Values.liveness.enabled true }}
59         livenessProbe:
60           tcpSocket:
61             port: {{ .Values.service.clientInternalPort }}
62           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
63           periodSeconds: {{ .Values.liveness.periodSeconds }}
64           timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
65         {{ end -}}
66         resources:
67 {{ include "common.resources" . | indent 10 }}
68         env:
69         - name: INITIAL_CLUSTER_SIZE
70           value: {{ .Values.replicaCount | quote }}
71         - name: SET_NAME
72           value: {{ include "common.fullname" . }}
73         - name: SERVICE_NAME
74           value: {{ include "common.servicename" . }}
75 {{- if .Values.extraEnv }}
76 {{ toYaml .Values.extraEnv | indent 8 }}
77 {{- end }}
78         lifecycle:
79           preStop:
80             exec:
81               command:
82                 - "/bin/sh"
83                 - "-ec"
84                 - |
85                   EPS=""
86                   for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
87                       EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
88                   done
89
90                   HOSTNAME=$(hostname)
91
92                   member_hash() {
93                       etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
94                   }
95
96                   SET_ID=${HOSTNAME##*[^0-9]}
97
98                   if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
99                       echo "Removing ${HOSTNAME} from etcd cluster"
100                       ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
101                       if [ $? -eq 0 ]; then
102                           # Remove everything otherwise the cluster will no longer scale-up
103                           rm -rf /var/run/etcd/*
104                       fi
105                   fi
106         command:
107           - "/bin/sh"
108           - "-ec"
109           - |
110             HOSTNAME=$(hostname)
111
112             # store member id into PVC for later member replacement
113             collect_member() {
114                 while ! etcdctl member list &>/dev/null; do sleep 1; done
115                 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
116                 exit 0
117             }
118
119             eps() {
120                 EPS=""
121                 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
122                     EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
123                 done
124                 echo ${EPS}
125             }
126
127             member_hash() {
128                 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
129             }
130
131             # we should wait for other pods to be up before trying to join
132             # otherwise we got "no such host" errors when trying to resolve other members
133             for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
134                 while true; do
135                     echo "Waiting for ${SET_NAME}-${i}.${SERVICE_NAME} to come up"
136                     ping -W 1 -c 1 ${SET_NAME}-${i}.${SERVICE_NAME} > /dev/null && break
137                     sleep 1s
138                 done
139             done
140
141             # re-joining after failure?
142             if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then
143                 echo "Re-joining etcd member"
144                 member_id=$(cat /var/run/etcd/member_id)
145
146                 # re-join member
147                 ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SERVICE_NAME}:2380 | true
148                 exec etcd --name ${HOSTNAME} \
149                     --listen-peer-urls http://0.0.0.0:2380 \
150                     --listen-client-urls http://0.0.0.0:2379\
151                     --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
152                     --data-dir /var/run/etcd/default.etcd
153             fi
154
155             # etcd-SET_ID
156             SET_ID=${HOSTNAME##*[^0-9]}
157
158             # adding a new member to existing cluster (assuming all initial pods are available)
159             if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
160                 export ETCDCTL_ENDPOINT=$(eps)
161
162                 # member already added?
163                 MEMBER_HASH=$(member_hash)
164                 if [ -n "${MEMBER_HASH}" ]; then
165                     # the member hash exists but for some reason etcd failed
166                     # as the datadir has not be created, we can remove the member
167                     # and retrieve new hash
168                     etcdctl member remove ${MEMBER_HASH}
169                 fi
170
171                 echo "Adding new member"
172                 etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SERVICE_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
173
174                 if [ $? -ne 0 ]; then
175                     echo "Exiting"
176                     rm -f /var/run/etcd/new_member_envs
177                     exit 1
178                 fi
179
180                 cat /var/run/etcd/new_member_envs
181                 source /var/run/etcd/new_member_envs
182
183                 collect_member &
184
185                 exec etcd --name ${HOSTNAME} \
186                     --listen-peer-urls http://0.0.0.0:2380 \
187                     --listen-client-urls http://0.0.0.0:2379 \
188                     --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
189                     --data-dir /var/run/etcd/default.etcd \
190                     --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
191                     --initial-cluster ${ETCD_INITIAL_CLUSTER} \
192                     --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
193             fi
194
195             PEERS=""
196             for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
197                 PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SERVICE_NAME}:2380"
198             done
199
200             collect_member &
201
202             # join member
203             exec etcd --name ${HOSTNAME} \
204                 --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
205                 --listen-peer-urls http://0.0.0.0:2380 \
206                 --listen-client-urls http://0.0.0.0:2379 \
207                 --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
208                 --initial-cluster-token etcd-cluster-1 \
209                 --initial-cluster ${PEERS} \
210                 --initial-cluster-state new \
211                 --data-dir /var/run/etcd/default.etcd
212         volumeMounts:
213         - name: {{ include "common.fullname" . }}-data
214           mountPath: /var/run/etcd
215   {{- if .Values.persistence.enabled }}
216   volumeClaimTemplates:
217   - metadata:
218       name: {{ include "common.fullname" . }}-data
219       labels:
220         name: {{ include "common.fullname" . }}
221         chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
222         release: "{{ include "common.release" . }}"
223         heritage: "{{ .Release.Service }}"
224     spec:
225       accessModes:
226       - "{{ .Values.persistence.accessMode }}"
227       storageClassName: {{ include "common.storageClass" . }}
228       resources:
229         requests:
230           # upstream recommended max is 700M
231           storage: "{{ .Values.persistence.storage }}"
232   {{- else }}
233       volumes:
234       - name: {{ include "common.fullname" . }}-data
235       {{- if .Values.memoryMode }}
236         emptyDir:
237           medium: Memory
238       {{- else }}
239         emptyDir: {}
240       {{- end }}
241   {{- end }}