1 # Copyright © 2020 Bitnami, AT&T, Amdocs, Bell Canada, highstreet technologies
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
15 #################################################################
16 # Global configuration defaults.
17 #################################################################
20 mountPath: /dockerdata-nfs
22 mountPath: /dockerdata-nfs/backup
24 clusterName: cluster.local
25 repositoryOverride: docker.io
26 #################################################################
27 # Application configuration defaults.
28 #################################################################
30 ## Elasticsearch curator parameters
35 imageName: bitnami/elasticsearch-curator
36 tag: 5.8.1-debian-9-r74
37 pullPolicy: IfNotPresent
38 ## Optionally specify an array of imagePullSecrets.
39 ## Secrets must be manually created in the namespace.
40 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
43 # - myRegistryKeySecretName
51 failedJobsHistoryLimit: ""
52 successfulJobsHistoryLimit: ""
53 jobRestartPolicy: Never
56 # Specifies whether RBAC should be enabled
59 # Specifies whether a ServiceAccount should be created
61 # The name of the ServiceAccount to use.
62 # If not set and create is true, a name is generated using the fullname template
65 # Specifies whether a podsecuritypolicy should be created
70 # run curator in dry-run mode
75 # Delete indices older than 90 days
80 action: delete_indices
81 description: "Clean up ES by deleting old indices"
84 continue_if_exception: False
86 ignore_empty_list: True
91 timestring: '%Y.%m.%d'
98 # Default config (this value is evaluated as a template)
103 {{ template "common.fullname" . }}.{{ template "common.namespace" . }}.svc.{{ .Values.global.clusterName }}
104 port: {{ .Values.service.port }}
110 # ssl_no_validate: True
118 # blacklist: ['elasticsearch', 'urllib3']
119 ## Curator resources requests and limits
120 ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
123 # We usually recommend not to specify default resources and to leave this as a conscious
124 # choice for the user. This also increases chances charts run on environments with little
125 # resources, such as Minikube. If you do want to specify resources, uncomment the following
126 # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
133 priorityClassName: ""
134 # extraVolumes and extraVolumeMounts allows you to mount other volumes
135 # Example Use Case: mount ssl certificates when elasticsearch has tls enabled
140 # secretName: es-certs
145 ## Add your own init container or uncomment and modify the given example.
147 extraInitContainers: {}
148 ## Don't configure S3 repository till Elasticsearch is reachable.
149 ## Ensure that it is available at http://elasticsearch:9200
151 # elasticsearch-s3-repository:
152 # image: bitnami/minideb:latest
153 # imagePullPolicy: "IfNotPresent"
159 # ES_HOST=elasticsearch
161 # ES_REPOSITORY=backup
162 # S3_REGION=us-east-1
164 # S3_BASE_PATH=backup
166 # S3_STORAGE_CLASS=standard
167 # install_packages curl && \
168 # ( counter=0; while (( counter++ < 120 )); do curl -s http://${ES_HOST}:${ES_PORT} >/dev/null 2>&1 && break; echo "Waiting for elasticsearch $counter/120"; sleep 1; done ) && \
169 # cat <<EOF | curl -sS -XPUT -H "Content-Type: application/json" -d @- http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} \
173 # "bucket": "${S3_BUCKET}",
174 # "base_path": "${S3_BASE_PATH}",
175 # "region": "${S3_REGION}",
176 # "compress": "${S3_COMPRESS}",
177 # "storage_class": "${S3_STORAGE_CLASS}"