X-Git-Url: https://gerrit.onap.org/r/gitweb?a=blobdiff_plain;f=packages%2Fbase%2Fsrc%2Ffiles%2Finstall%2Felk%2Fconfig%2Felasticsearch.yml;h=132f05ec90e3f75bface85dc77eaa96931ef4d5d;hb=ee033842018375e0200a9b8f2e0dc813974440fd;hp=618f2a241008d87ea67cb7dfddda1bf160abd334;hpb=87c95be02a8a4d77e165dede90777e811b59dcae;p=policy%2Fengine.git diff --git a/packages/base/src/files/install/elk/config/elasticsearch.yml b/packages/base/src/files/install/elk/config/elasticsearch.yml index 618f2a241..132f05ec9 100644 --- a/packages/base/src/files/install/elk/config/elasticsearch.yml +++ b/packages/base/src/files/install/elk/config/elasticsearch.yml @@ -1,392 +1,116 @@ -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at . -# -# The installation procedure is covered at -# . -# -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. -# -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: -# -#node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. -# -#cluster.name: elasticsearch -cluster.name: ${{FQDN}}-policy-sa - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: -# -#node.name: "Franz Kafka" -node.name: "${{FQDN}}" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. # -# Allow this node to be eligible as a master node (enabled by default): +#============LICENSE_START================================================== +# ONAP Policy Engine +#=========================================================================== +# Copyright (C) 2017-2018 AT&T Intellectual Property. All rights reserved. +#=========================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -#node.master: true +# http://www.apache.org/licenses/LICENSE-2.0 # -# Allow this node to store data (enabled by default): +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============LICENSE_END================================================== # -#node.data: true -# You can exploit these settings to design advanced cluster topologies. -# -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. -# -#node.master: false -#node.data: true -# -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. +# ======================== Elasticsearch Configuration ========================= # -#node.master: true -#node.data: false +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. # -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. # -#node.master: false -#node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as , -# , -# and -# to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -#node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. +# Please consult the documentation for further information on configuration options: +# https://www.elastic.co/guide/en/elasticsearch/reference/index.html # -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. +# ---------------------------------- Cluster ----------------------------------- # -# See and -# -# for more information. - -# Set the number of shards (splits) of an index (5 by default): +# Use a descriptive name for your cluster: # -#index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): +#cluster.name: my-application +cluster.name: ${{FQDN}}-policy-sa # -#index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: +# ------------------------------------ Node ------------------------------------ # -index.number_of_shards: 1 -index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: +# Use a descriptive name for the node: # -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. +#node.name: node-1 +node.name: "${{FQDN}}" # -# The "number_of_shards" is a one-time setting for an index. +# Add custom attributes to the node: # -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. +#node.attr.rack: r1 # -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API () to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory containing configuration (this file and logging.yml): +# ----------------------------------- Paths ------------------------------------ # -#path.conf: /path/to/conf - -# Path to directory where to store index data allocated for this node. +# Path to directory where to store the data (separate multiple locations by comma): # #path.data: /path/to/data # -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -#path.data: /path/to/data1,/path/to/data2 - -# Path to temporary files: -# -#path.work: /path/to/work - # Path to log files: # -#path.logs: /path/to/logs -path.logs: ${{POLICY_HOME}}/logs - -# Path to where plugins are installed: +path.logs: ${{POLICY_LOGS}} # -#path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. +# ----------------------------------- Memory ----------------------------------- # -#plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. +# Lock the memory on startup: # -# Set this property to true to lock the memory: +#bootstrap.memory_lock: true # -#bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. +# Make sure that the heap size is set to about half the memory available +# on the system and that the owner of the process is allowed to use this +# limit. # -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): +# Elasticsearch performs poorly when the system is swapping the memory. # -#network.bind_host: 10.00.10.00 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. +# ---------------------------------- Network ----------------------------------- # -#network.publish_host: 10.00.10.00 - -# Set both 'bind_host' and 'publish_host': +# Set the bind address to a specific IP (IPv4 or IPv6): # -#network.host: 10.00.10.00 - -# Set a custom port for the node to node communication (9300 by default): +# Only allow to run on localhost so it can't be queried from outside +network.bind_host: ["_local_"] # -#transport.tcp.port: 9300 - -# Enable compression for all communication between nodes (disabled by default): -# -#transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: +# Set a custom port for HTTP: # #http.port: 9200 - -# Set a custom allowed content length: # -#http.max_content_length: 100mb - -# Disable HTTP completely: +# For more information, consult the network module documentation. # -#http.enabled: false - - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# There are several types of gateway implementations. For more information, see -# . - -# The default gateway type is the "local" gateway (recommended): +# --------------------------------- Discovery ---------------------------------- # -#gateway.type: local - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: +# Pass an initial list of hosts to perform discovery when new node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] # -#gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): +#discovery.zen.ping.unicast.hosts: ["host1", "host2"] # -#gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): +# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1): # -#gateway.expected_nodes: 2 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: +#discovery.zen.minimum_master_nodes: 3 # -# 1. During the initial recovery +# For more information, consult the zen discovery module documentation. # -#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# ---------------------------------- Gateway ----------------------------------- # -# 2. During adding/removing nodes, rebalancing, etc +# Block initial recovery after a full cluster restart until N nodes are started: # -#cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +#gateway.recover_after_nodes: 3 # -#indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: +# For more information, consult the gateway module documentation. # -#indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. This should be set to a quorum/majority of -# the master-eligible nodes in the cluster. +# ---------------------------------- Various ----------------------------------- # -#discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: +# Require explicit names when deleting indices: # -#discovery.zen.ping.timeout: 3s - -# For more information, see -# - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. -# -# 1. Disable multicast discovery (enabled by default): -# -#discovery.zen.ping.multicast.enabled: false -# -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: -# -#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. -# -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# -# -# See -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see . - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see . - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s - -################################## Security ################################ - -# Uncomment if you want to enable JSONP as a valid return transport on the -# http server. With this enabled, it may pose a security risk, so disabling -# it unless you need it is recommended (it is disabled by default). -# -#http.jsonp.enable: true - -discovery.zen.ping.multicast.enabled: false -node.local: true -action.auto_create_index: false +#action.destructive_requires_name: true +#--------------------------------- Scripting ----------------------------------- +# These will be removed in v6.0 +script.inline: false +script.stored: false +script.file: false