0a909ffc5799d286ef9af6170115f36aba6b13c6
[dmaap/messagerouter/msgrtr.git] / src / main / java / org / onap / dmaap / dmf / mr / beans / DMaaPKafkaConsumerFactory.java
1 /*******************************************************************************
2  *  ============LICENSE_START=======================================================
3  *  org.onap.dmaap
4  *  ================================================================================
5  *  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6  *  ================================================================================
7  *  Licensed under the Apache License, Version 2.0 (the "License");
8  *  you may not use this file except in compliance with the License.
9  *  You may obtain a copy of the License at
10  *        http://www.apache.org/licenses/LICENSE-2.0
11 *  
12  *  Unless required by applicable law or agreed to in writing, software
13  *  distributed under the License is distributed on an "AS IS" BASIS,
14  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  *  See the License for the specific language governing permissions and
16  *  limitations under the License.
17  *  ============LICENSE_END=========================================================
18  *  
19  *  ECOMP is a trademark and service mark of AT&T Intellectual Property.
20  *  
21  *******************************************************************************/
22 package org.onap.dmaap.dmf.mr.beans;
23
24 import java.net.InetAddress;
25 import java.net.UnknownHostException;
26 import java.util.Collection;
27 import java.util.HashMap;
28 import java.util.Properties;
29 import java.util.concurrent.TimeUnit;
30
31 import org.apache.curator.framework.CuratorFramework;
32 import org.apache.curator.framework.recipes.locks.InterProcessMutex;
33 import org.apache.kafka.clients.consumer.KafkaConsumer;
34 import org.springframework.beans.factory.annotation.Qualifier;
35
36 import com.att.ajsc.filemonitor.AJSCPropertiesMap;
37 import org.onap.dmaap.dmf.mr.CambriaApiException;
38 import org.onap.dmaap.dmf.mr.backends.Consumer;
39 import org.onap.dmaap.dmf.mr.backends.ConsumerFactory;
40 import org.onap.dmaap.dmf.mr.backends.MetricsSet;
41 import org.onap.dmaap.dmf.mr.backends.kafka.Kafka011Consumer;
42 import org.onap.dmaap.dmf.mr.backends.kafka.Kafka011ConsumerUtil;
43 import org.onap.dmaap.dmf.mr.backends.kafka.KafkaConsumerCache;
44 import org.onap.dmaap.dmf.mr.backends.kafka.KafkaConsumerCache.KafkaConsumerCacheException;
45 import org.onap.dmaap.dmf.mr.backends.kafka.KafkaLiveLockAvoider2;
46 import org.onap.dmaap.dmf.mr.backends.kafka.LiveLockAvoidance;
47 import org.onap.dmaap.dmf.mr.constants.CambriaConstants;
48 import org.onap.dmaap.dmf.mr.utils.ConfigurationReader;
49 import org.onap.dmaap.dmf.mr.utils.Utils;
50 import com.att.eelf.configuration.EELFLogger;
51 import com.att.eelf.configuration.EELFManager;
52 import com.att.nsa.drumlin.till.nv.rrNvReadable.missingReqdSetting;
53
54 /**
55  * @author nilanjana.maity
56  *
57  */
58 public class DMaaPKafkaConsumerFactory implements ConsumerFactory {
59
60         
61         private static final EELFLogger log = EELFManager.getInstance().getLogger(DMaaPKafkaConsumerFactory.class);
62         
63
64         /**
65          * constructor initialization
66          * 
67          * @param settings
68          * @param metrics
69          * @param curator
70          * @throws missingReqdSetting
71          * @throws KafkaConsumerCacheException
72          * @throws UnknownHostException
73          */
74
75         public DMaaPKafkaConsumerFactory(@Qualifier("dMaaPMetricsSet") MetricsSet metrics,
76                         @Qualifier("curator") CuratorFramework curator,
77                         @Qualifier("kafkalockavoid") KafkaLiveLockAvoider2 kafkaLiveLockAvoider)
78                         throws missingReqdSetting, KafkaConsumerCacheException, UnknownHostException {
79
80                 String apiNodeId = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
81                                 CambriaConstants.kSetting_ApiNodeIdentifier);
82                 if (apiNodeId == null) {
83
84                         apiNodeId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + CambriaConstants.kDefault_Port;
85                 }
86
87                 log.info("This Cambria API Node identifies itself as [" + apiNodeId + "].");
88                 final String mode = CambriaConstants.DMAAP;
89
90                 fkafkaBrokers = com.att.ajsc.filemonitor.AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
91                                 "kafka.metadata.broker.list");
92                 if (null == fkafkaBrokers) {
93
94                         fkafkaBrokers = "localhost:9092";
95                 }
96
97                 boolean kSetting_EnableCache = kDefault_IsCacheEnabled;
98                 String strkSetting_EnableCache = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
99                                 "cambria.consumer.cache.enabled");
100                 if (null != strkSetting_EnableCache)
101                         kSetting_EnableCache = Boolean.parseBoolean(strkSetting_EnableCache);
102
103                 final boolean isCacheEnabled = kSetting_EnableCache;
104
105                 
106                 fCache = null;
107                 if (isCacheEnabled) {
108                         fCache = KafkaConsumerCache.getInstance();
109
110                 }
111                 if (fCache != null) {
112                         fCache.setfMetrics(metrics);
113                         fCache.setfApiId(apiNodeId);
114                         fCache.startCache(mode, curator);
115                         if(kafkaLiveLockAvoider!=null){
116                         kafkaLiveLockAvoider.startNewWatcherForServer(apiNodeId, makeAvoidanceCallback(apiNodeId));
117                         fkafkaLiveLockAvoider = kafkaLiveLockAvoider;
118                         }
119                 }
120         }
121
122         /*
123          * getConsumerFor
124          * 
125          * @see
126          * com.att.dmf.mr.backends.ConsumerFactory#getConsumerFor(java.lang.String,
127          * java.lang.String, java.lang.String, int, java.lang.String) This method is
128          * used by EventServiceImpl.getEvents() method to get a Kakfa consumer
129          * either from kafkaconsumer cache or create a new connection This also get
130          * the list of other consumer objects for the same consumer group and set to
131          * KafkaConsumer object. This list may be used during poll-rebalancing
132          * issue.
133          */
134         @Override
135         public Consumer getConsumerFor(String topic, String consumerGroupName, String consumerId, int timeoutMs,
136                         String remotehost) throws UnavailableException, CambriaApiException {
137                 Kafka011Consumer kc;
138
139                 // To synchronize based on the consumer group.
140
141                 Object syncObject = synchash.get(topic + consumerGroupName);
142                 if (null == syncObject) {
143                         syncObject = new Object();
144                         synchash.put(topic + consumerGroupName, syncObject);
145                 }
146
147                 synchronized (syncObject) {
148                         try {
149                                 kc = (fCache != null) ? fCache.getConsumerFor(topic, consumerGroupName, consumerId) : null; // consumerId
150
151                         } catch (KafkaConsumerCacheException e) {
152                                 log.info("######@@@@### Error occured in Kafka Caching" + e + "  " + topic + "::" + consumerGroupName
153                                                 + "::" + consumerId);
154                                 log.error("####@@@@## Error occured in Kafka Caching" + e + "  " + topic + "::" + consumerGroupName
155                                                 + "::" + consumerId);
156                                 throw new UnavailableException(e);
157                         }
158
159                         // Ideally if cache exists below flow should be skipped. If cache
160                         // didnt
161                         // exist, then create this first time on this node.
162                         if (kc == null) {
163
164                                 log.info("^Kafka consumer cache value " + topic + "::" + consumerGroupName + "::" + consumerId + " =>"
165                                                 + kc);
166
167                                 final InterProcessMutex ipLock = new InterProcessMutex(ConfigurationReader.getCurator(),
168                                                 "/consumerFactory/" + topic + "/" + consumerGroupName + "/" + consumerId);
169                                 boolean locked = false;
170
171                                 try {
172
173                                         locked = ipLock.acquire(30, TimeUnit.SECONDS);
174                                         if (!locked) {
175
176                                                 log.info("Could not acquire lock in order to create (topic, group, consumer) = " + "(" + topic
177                                                                 + ", " + consumerGroupName + ", " + consumerId + ") from " + remotehost);
178                                                 throw new UnavailableException(
179                                                                 "Could not acquire lock in order to create (topic, group, consumer) = " + "(" + topic
180                                                                                 + ", " + consumerGroupName + ", " + consumerId + ") " + remotehost);
181                                         }
182
183                                         // ConfigurationReader.getCurator().checkExists().forPath("S").
184
185                                         log.info("Creating Kafka consumer for group [" + consumerGroupName + "], consumer [" + consumerId
186                                                         + "], on topic [" + topic + "].");
187                                         
188                                         if (fCache != null) {
189                                                 fCache.signalOwnership(topic, consumerGroupName, consumerId);
190                                         }
191                                         
192                                         final Properties props = createConsumerConfig(topic,consumerGroupName, consumerId);
193                                         long fCreateTimeMs = System.currentTimeMillis();
194                                         KafkaConsumer<String, String> cc = new KafkaConsumer<>(props);
195                                         kc = new Kafka011Consumer(topic, consumerGroupName, consumerId, cc, fkafkaLiveLockAvoider);
196                                         log.info(" kafka stream created in " + (System.currentTimeMillis() - fCreateTimeMs));
197
198                                         if (fCache != null) {
199                                                 fCache.putConsumerFor(topic, consumerGroupName, consumerId, kc); //
200                                         }
201
202                                 } catch (org.I0Itec.zkclient.exception.ZkTimeoutException x) {
203                                         log.info(
204                                                         "Kafka consumer couldn't connect to ZK. " + x + " " + consumerGroupName + "/" + consumerId);
205                                         throw new UnavailableException("Couldn't connect to ZK.");
206                                 } catch (KafkaConsumerCacheException e) {
207                                         log.info("Failed to cache consumer (this may have performance implications): " + e.getMessage()
208                                                         + " " + consumerGroupName + "/" + consumerId);
209                                 } catch (UnavailableException u) {
210                                         log.info("Failed and in UnavailableException block " + u.getMessage() + " " + consumerGroupName
211                                                         + "/" + consumerId);
212                                         throw new UnavailableException("Error while acquiring consumer factory lock " + u.getMessage(), u);
213                                 } catch (Exception e) {
214                                         log.info("Failed and go to Exception block " + e.getMessage() + " " + consumerGroupName + "/"
215                                                         + consumerId);
216                                         log.error("Failed and go to Exception block " + e.getMessage() + " " + consumerGroupName + "/"
217                                                         + consumerId);
218                                         
219                                 } finally {
220                                         if (locked) {
221                                                 try {
222                                                         ipLock.release();
223                                                 } catch (Exception e) {
224                                                         throw new UnavailableException("Error while releasing consumer factory lock" + e, e);
225                                                 }
226                                         }
227                                 }
228                         }
229                 }
230                 return kc;
231         }
232
233         @Override
234         public synchronized void destroyConsumer(String topic, String consumerGroup, String clientId) {
235                 if (fCache != null) {
236                         fCache.dropConsumer(topic, consumerGroup, clientId);
237                 }
238         }
239
240         @Override
241         public synchronized Collection<? extends Consumer> getConsumers() {
242                 return fCache.getConsumers();
243         }
244
245         @Override
246         public synchronized void dropCache() {
247                 fCache.dropAllConsumers();
248         }
249
250         
251         private KafkaConsumerCache fCache;
252         private KafkaLiveLockAvoider2 fkafkaLiveLockAvoider;
253         private String fkafkaBrokers;
254
255
256
257         private static String makeLongKey(String key, String prefix) {
258                 return prefix + "." + key;
259         }
260
261         private void transferSettingIfProvided(Properties target, String key, String prefix) {
262                 String keyVal = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop, makeLongKey(key, prefix));
263
264                 
265                 if (null != keyVal) {
266                 
267                         log.info("Setting [" + key + "] to " + keyVal + ".");
268                         target.put(key, keyVal);
269                 }
270         }
271
272         /**
273          * Name CreateConsumerconfig  
274          * @param topic
275          * @param groupId
276          * @param consumerId
277          * @return Properties
278          * 
279          * This method is to create Properties required to create kafka connection
280          * Group name is replaced with different format groupid--topic to address same 
281          * groupids for multiple topics. Same groupid with multiple topics 
282          * may start frequent consumer rebalancing on all the topics . Replacing them makes it unique
283          */
284         private Properties createConsumerConfig(String topic ,String groupId, String consumerId) {
285                 final Properties props = new Properties();
286                 //fakeGroupName is added to avoid multiple consumer group for multiple topics.Donot Change this logic
287                 //Fix for CPFMF-644 : 
288                 final String fakeGroupName = groupId + "--" + topic;
289                 props.put("group.id", fakeGroupName);
290                 props.put("enable.auto.commit", "false"); // 0.11
291                 props.put("bootstrap.servers", fkafkaBrokers);
292                 if(Utils.isCadiEnabled()){
293                 props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='"+Utils.getKafkaproperty()+"';");
294                 props.put("security.protocol", "SASL_PLAINTEXT");
295                 props.put("sasl.mechanism", "PLAIN");
296                 }
297                 props.put("client.id", consumerId);
298
299                 // additional settings: start with our defaults, then pull in configured
300                 // overrides
301                 populateKafkaInternalDefaultsMap();
302                 for (String key : KafkaConsumerKeys) {
303                         transferSettingIfProvided(props, key, "kafka");
304                 }
305
306                 props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
307                 props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
308
309                 return props;
310         }
311
312
313         private static final String KafkaConsumerKeys[] = { "bootstrap.servers", "heartbeat.interval.ms",
314                         "auto.offset.reset", "exclude.internal.topics", "session.timeout.ms", "fetch.max.bytes",
315                         "auto.commit.interval.ms", "connections.max.idle.ms", "fetch.min.bytes", "isolation.level",
316                         "fetch.max.bytes", "request.timeout.ms", "fetch.max.wait.bytes", "reconnect.backoff.max.ms",
317                         "max.partition.fetch.bytes", "reconnect.backoff.max.ms", "reconnect.backoff.ms", "retry.backoff.ms",
318                         "max.poll.interval.ms", "max.poll.records", "receive.buffer.bytes", "metadata.max.age.ms" };
319
320         /**
321          * putting values in hashmap like consumer timeout, zookeeper time out, etc
322          * 
323          * @param setting
324          */
325         private static void populateKafkaInternalDefaultsMap() { }
326
327         /*
328          * The starterIncremnt value is just to emulate calling certain consumers,
329          * in this test app all the consumers are local
330          * 
331          */
332         private LiveLockAvoidance makeAvoidanceCallback(final String appId) {
333
334                 return new LiveLockAvoidance() {
335
336                         @Override
337                         public String getAppId() {
338                                 return appId;
339                         }
340
341                         @Override
342                         public void handleRebalanceUnlock(String groupName) {
343                                 log.info("FORCE A POLL NOW FOR appId: [{}] group: [{}]", getAppId(), groupName);
344                                 Kafka011ConsumerUtil.forcePollOnConsumer(groupName + "::");
345                         }
346
347                 };
348
349         }
350
351         @SuppressWarnings("rawtypes")
352         @Override
353         public HashMap getConsumerForKafka011(String topic, String consumerGroupName, String consumerId, int timeoutMs,
354                         String remotehost) throws UnavailableException, CambriaApiException {
355                 // TODO Auto-generated method stub
356                 return null;
357         }
358
359         private HashMap<String, Object> synchash = new HashMap<String, Object>();
360
361 }