2 # Copyright © 2019 Intel Corporation Inc
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
19 name: {{ include "common.fullname" . }}
21 heritage: "{{ .Release.Service }}"
22 release: "{{ include "common.release" . }}"
23 chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
24 app: {{ include "common.name" . }}
26 serviceName: {{ include "common.servicename" .}}
27 replicas: {{ .Values.replicaCount }}
30 app: {{ include "common.name" . }}
34 heritage: "{{ .Release.Service }}"
35 release: "{{ include "common.release" . }}"
36 chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
37 app: {{ include "common.name" . }}
39 {{- if .Values.affinity }}
41 {{ toYaml .Values.affinity | indent 8 }}
43 {{- if .Values.nodeSelector }}
45 {{ toYaml .Values.nodeSelector | indent 8 }}
47 {{- if .Values.tolerations }}
49 {{ toYaml .Values.tolerations | indent 8 }}
52 - name: "{{ include "common.namespace" . }}-docker-registry-key"
54 - name: {{ include "common.name" . }}
55 image: {{ include "repositoryGenerator.googleK8sRepository" . }}/{{ .Values.image }}
56 imagePullPolicy: "{{ .Values.pullPolicy }}"
58 - containerPort: {{ .Values.service.peerInternalPort }}
59 name: {{ .Values.service.peerPortName }}
60 - containerPort: {{ .Values.service.clientInternalPort }}
61 name: {{ .Values.service.clientPortName }}
62 {{- if eq .Values.liveness.enabled true }}
65 port: {{ .Values.service.clientInternalPort }}
66 initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
67 periodSeconds: {{ .Values.liveness.periodSeconds }}
68 timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
70 resources: {{ include "common.resources" . | nindent 10 }}
72 - name: INITIAL_CLUSTER_SIZE
73 value: {{ .Values.replicaCount | quote }}
75 value: {{ include "common.fullname" . }}
77 value: {{ include "common.servicename" . }}.{{ include "common.namespace" . }}.svc.{{ .Values.global.clusterName }}
78 {{- if .Values.extraEnv }}
79 {{ toYaml .Values.extraEnv | indent 8 }}
89 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
90 EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
96 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
99 SET_ID=${HOSTNAME##*[^0-9]}
101 if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
102 echo "Removing ${HOSTNAME} from etcd cluster"
103 ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
104 if [ $? -eq 0 ]; then
105 # Remove everything otherwise the cluster will no longer scale-up
106 rm -rf /var/run/etcd/*
115 # store member id into PVC for later member replacement
117 while ! etcdctl member list &>/dev/null; do sleep 1; done
118 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
124 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
125 EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SERVICE_NAME}:2379"
131 etcdctl member list | grep http://${HOSTNAME}.${SERVICE_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
134 # we should wait for other pods to be up before trying to join
135 # otherwise we got "no such host" errors when trying to resolve other members
136 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
137 if [ "${SET_NAME}-${i}" == "${HOSTNAME}" ]; then
138 echo "Skipping self-checking"
142 echo "Waiting for ${SET_NAME}-${i}.${SERVICE_NAME} to come up"
143 ping -W 1 -c 1 ${SET_NAME}-${i}.${SERVICE_NAME} > /dev/null && break
148 # re-joining after failure?
149 if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then
150 echo "Re-joining etcd member"
151 member_id=$(cat /var/run/etcd/member_id)
154 ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SERVICE_NAME}:2380 | true
155 exec etcd --name ${HOSTNAME} \
156 --listen-peer-urls http://0.0.0.0:2380 \
157 --listen-client-urls http://0.0.0.0:2379\
158 --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
159 --data-dir /var/run/etcd/default.etcd
163 SET_ID=${HOSTNAME##*[^0-9]}
165 # adding a new member to existing cluster (assuming all initial pods are available)
166 if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
167 export ETCDCTL_ENDPOINT=$(eps)
169 # member already added?
170 MEMBER_HASH=$(member_hash)
171 if [ -n "${MEMBER_HASH}" ]; then
172 # the member hash exists but for some reason etcd failed
173 # as the datadir has not be created, we can remove the member
174 # and retrieve new hash
175 etcdctl member remove ${MEMBER_HASH}
178 echo "Adding new member"
179 etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SERVICE_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
181 if [ $? -ne 0 ]; then
183 rm -f /var/run/etcd/new_member_envs
187 cat /var/run/etcd/new_member_envs
188 . /var/run/etcd/new_member_envs
192 exec etcd --name ${HOSTNAME} \
193 --listen-peer-urls http://0.0.0.0:2380 \
194 --listen-client-urls http://0.0.0.0:2379 \
195 --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
196 --data-dir /var/run/etcd/default.etcd \
197 --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
198 --initial-cluster ${ETCD_INITIAL_CLUSTER} \
199 --initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
203 for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
204 PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SERVICE_NAME}:2380"
210 exec etcd --name ${HOSTNAME} \
211 --initial-advertise-peer-urls http://${HOSTNAME}.${SERVICE_NAME}:2380 \
212 --listen-peer-urls http://0.0.0.0:2380 \
213 --listen-client-urls http://0.0.0.0:2379 \
214 --advertise-client-urls http://${HOSTNAME}.${SERVICE_NAME}:2379 \
215 --initial-cluster-token etcd-cluster-1 \
216 --initial-cluster ${PEERS} \
217 --initial-cluster-state new \
218 --data-dir /var/run/etcd/default.etcd
220 - name: {{ include "common.fullname" . }}-data
221 mountPath: /var/run/etcd
222 {{- if .Values.persistence.enabled }}
223 volumeClaimTemplates:
225 name: {{ include "common.fullname" . }}-data
227 name: {{ include "common.fullname" . }}
228 chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
229 release: "{{ include "common.release" . }}"
230 heritage: "{{ .Release.Service }}"
233 - "{{ .Values.persistence.accessMode }}"
234 storageClassName: {{ include "common.storageClass" . }}
237 # upstream recommended max is 700M
238 storage: "{{ .Values.persistence.storage }}"
241 - name: {{ include "common.fullname" . }}-data
242 {{- if .Values.memoryMode }}