{{/* # LICENSE_START======================================================= # org.onap.dmaap # ================================================================================ # Copyright © 2017 AT&T Intellectual Property. All rights reserved. # Modifications Copyright © 2021-2022 Nordix Foundation # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============LICENSE_END========================================================= # # ECOMP is a trademark and service mark of AT&T Intellectual Property. # ############################################################################### ############################################################################### */}} ## ## Kafka Connection ## ## Items below are passed through to Kafka's producer and consumer ## configurations (after removing "kafka.") ## if you want to change request.required.acks it can take this one value #kafka.request.required.acks=-1 kafka.metadata.broker.list={{ include "common.release" . }}-strimzi-kafka-bootstrap:9092 config.zk.servers=127.0.0.1:{{ .Values.global.zkTunnelService.internalPort }} consumer.timeout.ms=100 zookeeper.connection.timeout.ms=6000 zookeeper.session.timeout.ms=20000 zookeeper.sync.time.ms=2000 auto.commit.interval.ms=1000 fetch.message.max.bytes =1000000 auto.commit.enable=false #(backoff*retries > zksessiontimeout) kafka.rebalance.backoff.ms=10000 kafka.rebalance.max.retries=6 ############################################################################### ## ## Secured Config ## ## Some data stored in the config system is sensitive -- API keys and secrets, ## for example. to protect it, we use an encryption layer for this section ## of the config. ## ## The key is a base64 encode AES key. This must be created/configured for ## each installation. #cambria.secureConfig.key= ## ## The initialization vector is a 16 byte value specific to the secured store. ## This must be created/configured for each installation. #cambria.secureConfig.iv= ## Southfield Sandbox cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q== cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw== authentication.adminSecret=fe3cCompound #cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw== #cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q== ############################################################################### ## ## Consumer Caching ## ## Kafka expects live connections from the consumer to the broker, which ## obviously doesn't work over connectionless HTTP requests. The Cambria ## server proxies HTTP requests into Kafka consumer sessions that are kept ## around for later re-use. Not doing so is costly for setup per request, ## which would substantially impact a high volume consumer's performance. ## ## This complicates Cambria server failover, because we often need server ## A to close its connection before server B brings up the replacement. ## ## The consumer cache is normally enabled. #cambria.consumer.cache.enabled=true ## Cached consumers are cleaned up after a period of disuse. The server inspects ## consumers every sweepFreqSeconds and will clean up any connections that are ## dormant for touchFreqMs. #cambria.consumer.cache.sweepFreqSeconds=15 cambria.consumer.cache.touchFreqMs=120000 ##stickforallconsumerrequests=false ## The cache is managed through ZK. The default value for the ZK connection ## string is the same as config.zk.servers. #cambria.consumer.cache.zkConnect=${config.zk.servers} ## ## Shared cache information is associated with this node's name. The default ## name is the hostname plus the HTTP service port this host runs on. (The ## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(), ## which is not always adequate.) You can set this value explicitly here. ## #cambria.api.node.identifier= #cambria.rateLimit.maxEmptyPollsPerMinute=30 #cambria.rateLimitActual.delay.ms=10 ############################################################################### ## ## Metrics Reporting ## ## This server can report its metrics periodically on a topic. ## #metrics.send.cambria.enabled=true #metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap #metrics.send.cambria.sendEverySeconds=60 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache consumer.timeout=17 default.partitions=3 default.replicas=3 ############################################################################## #100mb maxcontentlength=10000 ############################################################################## #AAF Properties msgRtr.namespace.aaf=org.onap.dmaap.mr.topic msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic: enforced.topic.name.AAF=org.onap.dmaap.mr forceAAF=false useCustomAcls=false transidUEBtopicreqd=false defaultNSforUEB=org.onap.dmaap.mr ############################################################################## #Mirror Maker Agent msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic: msgRtr.mirrormaker.timeout=15000 msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent msgRtr.mirrormaker.consumergroup=mmagentserver msgRtr.mirrormaker.consumerid=1 kafka.max.poll.interval.ms=300000 kafka.heartbeat.interval.ms=60000 kafka.session.timeout.ms=240000 kafka.max.poll.records=1000