1 ###############################################################################
2 # ============LICENSE_START=======================================================
4 # ================================================================================
5 # Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6 # ================================================================================
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 # ============LICENSE_END=========================================================
19 # ECOMP is a trademark and service mark of AT&T Intellectual Property.
21 ###############################################################################
22 ###############################################################################
24 ## Cambria API Server config
26 ## - Default values are shown as commented settings.
29 ###############################################################################
33 ## - 3904 is standard as of 7/29/14.
35 ## Zookeeper Connection
37 ## Both Cambria and Kafka make use of Zookeeper.
39 #config.zk.servers=172.18.1.1
40 config.zk.servers=<zookeeper_host>
41 #config.zk.root=/fe3c/cambria/config
44 ###############################################################################
48 ## Items below are passed through to Kafka's producer and consumer
49 ## configurations (after removing "kafka.")
50 ## if you want to change request.required.acks it can take this one value
51 #kafka.metadata.broker.list=localhost:9092,localhost:9093
52 kafka.metadata.broker.list=<kafka_host>:<kafka_port>
53 ##kafka.request.required.acks=-1
54 #kafka.client.zookeeper=${config.zk.servers}
55 consumer.timeout.ms=100
56 zookeeper.connection.timeout.ms=6000
57 zookeeper.session.timeout.ms=6000
58 zookeeper.sync.time.ms=2000
59 auto.commit.interval.ms=1000
60 fetch.message.max.bytes =1000000
61 auto.commit.enable=false
64 ###############################################################################
68 ## Some data stored in the config system is sensitive -- API keys and secrets,
69 ## for example. to protect it, we use an encryption layer for this section
72 ## The key is a base64 encode AES key. This must be created/configured for
74 #cambria.secureConfig.key=
76 ## The initialization vector is a 16 byte value specific to the secured store.
77 ## This must be created/configured for each installation.
78 #cambria.secureConfig.iv=
81 cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
82 cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
83 authentication.adminSecret=fe3cCompound
84 #cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
85 #cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
88 ###############################################################################
92 ## Kafka expects live connections from the consumer to the broker, which
93 ## obviously doesn't work over connectionless HTTP requests. The Cambria
94 ## server proxies HTTP requests into Kafka consumer sessions that are kept
95 ## around for later re-use. Not doing so is costly for setup per request,
96 ## which would substantially impact a high volume consumer's performance.
98 ## This complicates Cambria server failover, because we often need server
99 ## A to close its connection before server B brings up the replacement.
102 ## The consumer cache is normally enabled.
103 #cambria.consumer.cache.enabled=true
105 ## Cached consumers are cleaned up after a period of disuse. The server inspects
106 ## consumers every sweepFreqSeconds and will clean up any connections that are
107 ## dormant for touchFreqMs.
108 #cambria.consumer.cache.sweepFreqSeconds=15
109 #cambria.consumer.cache.touchFreqMs=120000
111 ## The cache is managed through ZK. The default value for the ZK connection
112 ## string is the same as config.zk.servers.
113 #cambria.consumer.cache.zkConnect=${config.zk.servers}
116 ## Shared cache information is associated with this node's name. The default
117 ## name is the hostname plus the HTTP service port this host runs on. (The
118 ## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
119 ## which is not always adequate.) You can set this value explicitly here.
121 #cambria.api.node.identifier=<use-something-unique-to-this-instance>
123 ###############################################################################
127 ## This server can report its metrics periodically on a topic.
129 #metrics.send.cambria.enabled=true
130 #metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
131 #metrics.send.cambria.sendEverySeconds=60
133 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
135 ##############################################################################
137 maxcontentlength=10000
140 ##############################################################################
142 msgRtr.namespace.aaf=com.onap.dmaap.mr.topic
143 msgRtr.topicfactory.aaf=org.openecomp.dmaapBC.topicFactory|:org.openecomp.dmaapBC.topic:
144 enforced.topic.name.AAF=com.onap
146 transidUEBtopicreqd=false
147 defaultNSforUEB=com.onap.dmaap.mr.ueb
148 ##############################################################################
150 msgRtr.mirrormakeradmin.aaf=com.onap.dmaap.mr.dev.mirrormaker|*|admin
151 msgRtr.mirrormakeruser.aaf=com.onap.dmaap.mr.dev.mirrormaker|*|user
152 msgRtr.mirrormakeruser.aaf.create=com.onap.dmaap.mr.dev.topicFactory|:com.onap.dmaap.mr.dev.topic:
153 msgRtr.mirrormaker.timeout=15000
154 msgRtr.mirrormaker.topic=com.onap.dmaap.mr.prod.mm.agent
155 msgRtr.mirrormaker.consumergroup=mmagentserver
156 msgRtr.mirrormaker.consumerid=1