Fix issue with etcd pod startup
[oom.git] / kubernetes / common / etcd / templates / statefulset.yaml
index 8b6a534..7190c5b 100644 (file)
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 apiVersion: apps/v1beta1
 kind: StatefulSet
 metadata:
@@ -55,17 +54,12 @@ spec:
           name: {{ .Values.service.clientPortName }}
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
-          exec:
-            command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ]
-            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
-            periodSeconds: {{ .Values.liveness.periodSeconds }}
-            timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
-          {{ end -}}
-        readinessProbe:
-          exec:
-            command: ["/bin/sh", "-c", "etcdctl cluster-health | grep -w healthy" ]
-            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
-            periodSeconds: {{ .Values.readiness.periodSeconds }}
+          tcpSocket:
+            port: {{ .Values.service.clientInternalPort }}
+          initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+          periodSeconds: {{ .Values.liveness.periodSeconds }}
+          timeoutSeconds: {{ .Values.liveness.timeoutSeconds }}
+        {{ end -}}
         resources:
 {{ include "common.resources" . | indent 10 }}
         env:
@@ -142,7 +136,7 @@ spec:
             done
 
             # re-joining after failure?
-            if [ -e /var/run/etcd/default.etcd ]; then
+            if [[ -e /var/run/etcd/default.etcd && -f /var/run/etcd/member_id ]]; then
                 echo "Re-joining etcd member"
                 member_id=$(cat /var/run/etcd/member_id)