Etcd pod fails to start with file not found error.
The error is being caused because the pod is stuck
in an infinite loop as the readiness probe is not true.
Since, we are anyway checking the pod status, we removed
the readiness probe as it is not needed.
Bumped up version of etcd to 3.2.24
This fixes a known issue:
https://github.com/etcd-io/etcd/pull/4861
Issue-ID: MULTICLOUD-660
Change-Id: I815766b4a8f187d88bb2fcdb71e9d6e24b277d25
Signed-off-by: Kiran Kamineni <kiran.k.kamineni@intel.com>
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: {{ .Values.service.clientPortName }}
{{- if eq .Values.liveness.enabled true }}
livenessProbe:
name: {{ .Values.service.clientPortName }}
{{- if eq .Values.liveness.enabled true }}
livenessProbe:
- exec:
- command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ]
- initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
- periodSeconds: {{ .Values.liveness.periodSeconds }}
- timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
- {{ end -}}
- readinessProbe:
- exec:
- command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ]
- initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
- periodSeconds: {{ .Values.readiness.periodSeconds }}
+ tcpSocket:
+ port: {{ .Values.service.clientInternalPort }}
+ initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+ periodSeconds: {{ .Values.liveness.periodSeconds }}
+ timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+ {{ end -}}
resources:
{{ include "common.resources" . | indent 10 }}
env:
resources:
{{ include "common.resources" . | indent 10 }}
env:
done
# re-joining after failure?
done
# re-joining after failure?
- if [ -e /var/run/etcd/default.etcd ]; then
+ if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then
echo "Re-joining etcd member"
member_id=$(cat /var/run/etcd/member_id)
echo "Re-joining etcd member"
member_id=$(cat /var/run/etcd/member_id)
#repository: etcd
repository: "k8s.gcr.io"
#repository: etcd
repository: "k8s.gcr.io"
-image: "etcd-amd64:2.2.5"
+image: "etcd-amd64:3.2.24"
pullPolicy: Always
# default number of instances in the StatefulSet
pullPolicy: Always
# default number of instances in the StatefulSet
# probe configuration parameters
liveness:
initialDelaySeconds: 90
# probe configuration parameters
liveness:
initialDelaySeconds: 90
- periodSeconds: 10
- timeoutSeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 5
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
# necessary to disable liveness probe when setting breakpoints
# in debugger so K8s doesn't restart unresponsive container
enabled: true
-readiness:
- initialDelaySeconds: 90
- periodSeconds: 10
-
persistence:
enabled: false
## etcd data Persistent Volume Storage Class
persistence:
enabled: false
## etcd data Persistent Volume Storage Class