1 /*******************************************************************************
2 * ============LICENSE_START=======================================================
4 * ================================================================================
5 * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6 * ================================================================================
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 * ============LICENSE_END=========================================================
19 * ECOMP is a trademark and service mark of AT&T Intellectual Property.
21 *******************************************************************************/
22 package org.onap.dmaap.dmf.mr.beans;
24 import java.util.Arrays;
25 import java.util.LinkedList;
26 import java.util.List;
27 import java.util.Properties;
29 import java.util.TreeSet;
30 import java.util.concurrent.ExecutionException;
32 import org.I0Itec.zkclient.ZkClient;
33 import org.I0Itec.zkclient.exception.ZkNoNodeException;
34 import org.apache.kafka.clients.admin.AdminClient;
35 import org.apache.kafka.clients.admin.AdminClientConfig;
36 import org.apache.kafka.clients.admin.CreateTopicsResult;
37 import org.apache.kafka.clients.admin.NewTopic;
38 import org.apache.kafka.common.KafkaFuture;
39 import org.json.JSONObject;
40 import org.json.JSONArray;
41 import org.springframework.beans.factory.annotation.Qualifier;
42 import org.springframework.util.StringUtils;
43 import org.onap.dmaap.dmf.mr.CambriaApiException;
44 import org.onap.dmaap.dmf.mr.constants.CambriaConstants;
45 import org.onap.dmaap.dmf.mr.metabroker.Broker1;
46 import org.onap.dmaap.dmf.mr.metabroker.Topic;
47 import org.onap.dmaap.dmf.mr.utils.ConfigurationReader;
48 import org.onap.dmaap.dmf.mr.utils.Utils;
49 import com.att.eelf.configuration.EELFLogger;
50 import com.att.eelf.configuration.EELFManager;
52 import com.att.nsa.configs.ConfigDb;
53 import com.att.nsa.configs.ConfigDbException;
54 import com.att.nsa.configs.ConfigPath;
55 import com.att.nsa.drumlin.service.standards.HttpStatusCodes;
56 import com.att.nsa.drumlin.till.nv.rrNvReadable;
57 import com.att.nsa.security.NsaAcl;
58 import com.att.nsa.security.NsaAclUtils;
59 import com.att.nsa.security.NsaApiKey;
63 * class performing all topic operations
65 * @author anowarul.islam
69 public class DMaaPKafkaMetaBroker implements Broker1 {
71 public DMaaPKafkaMetaBroker() {
73 fCambriaConfig = null;
74 fBaseTopicData = null;
75 final Properties props = new Properties ();
76 String fkafkaBrokers = com.att.ajsc.filemonitor.AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
77 "kafka.metadata.broker.list");
78 if (StringUtils.isEmpty(fkafkaBrokers)) {
80 fkafkaBrokers = "localhost:9092";
83 props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, fkafkaBrokers );
84 if(Utils.isCadiEnabled()){
85 props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='"+Utils.getKafkaproperty()+"';");
86 props.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
87 props.put("sasl.mechanism", "PLAIN");
90 fKafkaAdminClient=AdminClient.create ( props );
94 private static final EELFLogger log = EELFManager.getInstance().getLogger(ConfigurationReader.class);
95 private final AdminClient fKafkaAdminClient;
100 * DMaaPKafkaMetaBroker constructor initializing
106 public DMaaPKafkaMetaBroker(@Qualifier("propertyReader") rrNvReadable settings,
107 @Qualifier("dMaaPZkClient") ZkClient zk, @Qualifier("dMaaPZkConfigDb") ConfigDb configDb) {
109 fCambriaConfig = configDb;
110 fBaseTopicData = configDb.parse("/topics");
111 final Properties props = new Properties ();
112 String fkafkaBrokers = com.att.ajsc.filemonitor.AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
113 "kafka.metadata.broker.list");
114 if (null == fkafkaBrokers) {
116 fkafkaBrokers = "localhost:9092";
119 if(Utils.isCadiEnabled()){
120 props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='admin' password='"+Utils.getKafkaproperty()+"';");
121 props.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
122 props.put("sasl.mechanism", "PLAIN");
124 props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, fkafkaBrokers );
126 fKafkaAdminClient=AdminClient.create ( props );
132 public DMaaPKafkaMetaBroker( rrNvReadable settings,
133 ZkClient zk, ConfigDb configDb,AdminClient client) {
136 fCambriaConfig = configDb;
137 fBaseTopicData = configDb.parse("/topics");
138 fKafkaAdminClient= client;
145 public List<Topic> getAllTopics() throws ConfigDbException {
146 log.info("Retrieving list of all the topics.");
147 final LinkedList<Topic> result = new LinkedList<>();
149 log.info("Retrieving all topics from root: " + zkTopicsRoot);
150 final List<String> topics = fZk.getChildren(zkTopicsRoot);
151 for (String topic : topics) {
152 result.add(new KafkaTopic(topic, fCambriaConfig, fBaseTopicData));
154 JSONObject dataObj = new JSONObject();
155 dataObj.put("topics", new JSONObject());
157 for (String topic : topics) {
158 dataObj.getJSONObject("topics").put(topic, new JSONObject());
160 } catch (ZkNoNodeException excp) {
161 // very fresh kafka doesn't have any topics or a topics node
162 log.error("ZK doesn't have a Kakfa topics node at " + zkTopicsRoot, excp);
168 public Topic getTopic(String topic) throws ConfigDbException {
169 if (fZk.exists(zkTopicsRoot + "/" + topic)) {
170 return getKafkaTopicConfig(fCambriaConfig, fBaseTopicData, topic);
172 // else: no such topic in kafka
177 * static method get KafkaTopic object
183 * @throws ConfigDbException
185 public static KafkaTopic getKafkaTopicConfig(ConfigDb db, ConfigPath base, String topic) throws ConfigDbException {
186 return new KafkaTopic(topic, db, base);
193 public Topic createTopic(String topic, String desc, String ownerApiKey, int partitions, int replicas,
194 boolean transactionEnabled) throws TopicExistsException, CambriaApiException,ConfigDbException {
195 log.info("Creating topic: " + topic);
197 log.info("Check if topic [" + topic + "] exist.");
198 // first check for existence "our way"
199 final Topic t = getTopic(topic);
201 log.info("Could not create topic [" + topic + "]. Topic Already exists.");
202 throw new TopicExistsException("Could not create topic [" + topic + "]. Topic Alreay exists.");
204 } catch (ConfigDbException e1) {
205 log.error("Topic [" + topic + "] could not be created. Couldn't check topic data in config db.", e1);
206 throw new CambriaApiException(HttpStatusCodes.k503_serviceUnavailable,
207 "Couldn't check topic data in config db.");
210 // we only allow 3 replicas. (If we don't test this, we get weird
211 // results from the cluster,
212 // so explicit test and fail.)
213 if (replicas < 1 || replicas > 3) {
214 log.info("Topic [" + topic + "] could not be created. The replica count must be between 1 and 3.");
215 throw new CambriaApiException(HttpStatusCodes.k400_badRequest,
216 "The replica count must be between 1 and 3.");
218 if (partitions < 1) {
219 log.info("Topic [" + topic + "] could not be created. The partition count must be at least 1.");
220 throw new CambriaApiException(HttpStatusCodes.k400_badRequest, "The partition count must be at least 1.");
226 final NewTopic topicRequest =
227 new NewTopic(topic, partitions, (short)replicas);
228 final CreateTopicsResult ctr =
229 fKafkaAdminClient.createTopics(Arrays.asList(topicRequest));
230 final KafkaFuture<Void> ctrResult = ctr.all();
232 // underlying Kafka topic created. now setup our API info
233 return createTopicEntry(topic, desc, ownerApiKey, transactionEnabled);
234 } catch (InterruptedException e) {
235 log.warn("Execution of describeTopics timed out.");
236 throw new ConfigDbException(e);
237 } catch (ExecutionException e) {
238 log.warn("Execution of describeTopics failed: " + e.getCause().getMessage(), e);
239 throw new ConfigDbException(e.getCause());
245 public void deleteTopic(String topic) throws CambriaApiException, TopicExistsException,ConfigDbException {
246 log.info("Deleting topic: " + topic);
248 log.info("Loading zookeeper client for topic deletion.");
249 // topic creation. (Otherwise, the topic is only partially created
253 fKafkaAdminClient.deleteTopics(Arrays.asList(topic));
254 log.info("Zookeeper client loaded successfully. Deleting topic.");
256 } catch (Exception e) {
257 log.error("Failed to delete topic [" + topic + "]. " + e.getMessage(), e);
258 throw new ConfigDbException(e);
260 log.info("Closing zookeeper connection.");
264 private final ZkClient fZk;
265 private final ConfigDb fCambriaConfig;
266 private final ConfigPath fBaseTopicData;
268 private static final String zkTopicsRoot = "/brokers/topics";
269 private static final JSONObject kEmptyAcl = new JSONObject();
272 * method Providing KafkaTopic Object associated with owner and
273 * transactionenabled or not
278 * @param transactionEnabled
280 * @throws ConfigDbException
282 public KafkaTopic createTopicEntry(String name, String desc, String owner, boolean transactionEnabled)
283 throws ConfigDbException {
284 return createTopicEntry(fCambriaConfig, fBaseTopicData, name, desc, owner, transactionEnabled);
288 * static method giving kafka topic object
295 * @param transactionEnabled
297 * @throws ConfigDbException
299 public static KafkaTopic createTopicEntry(ConfigDb db, ConfigPath basePath, String name, String desc, String owner,
300 boolean transactionEnabled) throws ConfigDbException {
301 final JSONObject o = new JSONObject();
302 o.put("owner", owner);
303 o.put("description", desc);
304 o.put("txenabled", transactionEnabled);
305 db.store(basePath.getChild(name), o.toString());
306 return new KafkaTopic(name, db, basePath);
310 * class performing all user opearation like user is eligible to read,
311 * write. permitting a user to write and read,
313 * @author anowarul.islam
316 public static class KafkaTopic implements Topic {
318 * constructor initializes
323 * @throws ConfigDbException
325 public KafkaTopic(String name, ConfigDb configdb, ConfigPath baseTopic) throws ConfigDbException {
327 fConfigDb = configdb;
328 fBaseTopicData = baseTopic;
330 String data = fConfigDb.load(fBaseTopicData.getChild(fName));
335 final JSONObject o = new JSONObject(data);
336 fOwner = o.optString("owner", "");
337 fDesc = o.optString("description", "");
338 fTransactionEnabled = o.optBoolean("txenabled", false);// default
341 // if this topic has an owner, it needs both read/write ACLs. If there's no
342 // owner (or it's empty), null is okay -- this is for existing or implicitly
344 JSONObject readers = o.optJSONObject ( "readers" );
345 if ( readers == null && fOwner.length () > 0 )
349 fReaders = fromJson ( readers );
351 JSONObject writers = o.optJSONObject ( "writers" );
352 if ( writers == null && fOwner.length () > 0 )
356 fWriters = fromJson ( writers );
359 private NsaAcl fromJson(JSONObject o) {
360 NsaAcl acl = new NsaAcl();
362 JSONArray a = o.optJSONArray("allowed");
364 for (int i = 0; i < a.length(); ++i) {
365 String user = a.getString(i);
374 public String getName() {
379 public String getOwner() {
384 public String getDescription() {
389 public NsaAcl getReaderAcl() {
394 public NsaAcl getWriterAcl() {
399 public void checkUserRead(NsaApiKey user) throws AccessDeniedException {
400 NsaAclUtils.checkUserAccess ( fOwner, getReaderAcl(), user );
404 public void checkUserWrite(NsaApiKey user) throws AccessDeniedException {
405 NsaAclUtils.checkUserAccess ( fOwner, getWriterAcl(), user );
409 public void permitWritesFromUser(String pubId, NsaApiKey asUser)
410 throws ConfigDbException, AccessDeniedException {
411 updateAcl(asUser, false, true, pubId);
415 public void denyWritesFromUser(String pubId, NsaApiKey asUser) throws ConfigDbException, AccessDeniedException {
416 updateAcl(asUser, false, false, pubId);
420 public void permitReadsByUser(String consumerId, NsaApiKey asUser)
421 throws ConfigDbException, AccessDeniedException {
422 updateAcl(asUser, true, true, consumerId);
426 public void denyReadsByUser(String consumerId, NsaApiKey asUser)
427 throws ConfigDbException, AccessDeniedException {
428 updateAcl(asUser, true, false, consumerId);
431 private void updateAcl(NsaApiKey asUser, boolean reader, boolean add, String key)
432 throws ConfigDbException, AccessDeniedException{
435 final NsaAcl acl = NsaAclUtils.updateAcl ( this, asUser, key, reader, add );
437 // we have to assume we have current data, or load it again. for the expected use
438 // case, assuming we can overwrite the data is fine.
439 final JSONObject o = new JSONObject ();
440 o.put ( "owner", fOwner );
441 o.put ( "readers", safeSerialize ( reader ? acl : fReaders ) );
442 o.put ( "writers", safeSerialize ( reader ? fWriters : acl ) );
443 fConfigDb.store ( fBaseTopicData.getChild ( fName ), o.toString () );
445 log.info ( "ACL_UPDATE: " + asUser.getKey () + " " + ( add ? "added" : "removed" ) + ( reader?"subscriber":"publisher" ) + " " + key + " on " + fName );
448 catch ( ConfigDbException | AccessDeniedException x )
455 private JSONObject safeSerialize(NsaAcl acl) {
456 return acl == null ? null : acl.serialize();
459 private final String fName;
460 private final ConfigDb fConfigDb;
461 private final ConfigPath fBaseTopicData;
462 private final String fOwner;
463 private final String fDesc;
464 private final NsaAcl fReaders;
465 private final NsaAcl fWriters;
466 private boolean fTransactionEnabled;
468 public boolean isTransactionEnabled() {
469 return fTransactionEnabled;
473 public Set<String> getOwners() {
474 final TreeSet<String> owners = new TreeSet<>();
475 owners.add ( fOwner );