1 /*******************************************************************************
\r
2 * ============LICENSE_START====================================================
\r
4 * * ===========================================================================
\r
5 * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
\r
6 * * ===========================================================================
\r
7 * * Licensed under the Apache License, Version 2.0 (the "License");
\r
8 * * you may not use this file except in compliance with the License.
\r
9 * * You may obtain a copy of the License at
\r
11 * * http://www.apache.org/licenses/LICENSE-2.0
\r
13 * * Unless required by applicable law or agreed to in writing, software
\r
14 * * distributed under the License is distributed on an "AS IS" BASIS,
\r
15 * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
16 * * See the License for the specific language governing permissions and
\r
17 * * limitations under the License.
\r
18 * * ============LICENSE_END====================================================
\r
20 * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
\r
22 ******************************************************************************/
\r
23 package org.onap.aaf.dao;
\r
25 import java.io.ByteArrayInputStream;
\r
26 import java.io.DataInputStream;
\r
27 import java.lang.reflect.Field;
\r
28 import java.nio.ByteBuffer;
\r
29 import java.util.List;
\r
31 import org.onap.aaf.authz.env.AuthzTrans;
\r
32 import org.onap.aaf.authz.layer.Result;
\r
33 import org.onap.aaf.dao.aaf.cass.Status;
\r
35 import org.onap.aaf.inno.env.TransStore;
\r
36 import com.datastax.driver.core.Cluster;
\r
37 import com.datastax.driver.core.ConsistencyLevel;
\r
38 import com.datastax.driver.core.ResultSet;
\r
39 import com.datastax.driver.core.ResultSetFuture;
\r
44 * Deal with the essentials of Interaction with Cassandra DataStore for all Cassandra DAOs
\r
49 public class CassDAOImpl<TRANS extends TransStore,DATA> extends AbsCassDAO<TRANS, DATA> implements DAO<TRANS,DATA> {
\r
50 public static final String USER_NAME = "__USER_NAME__";
\r
51 protected static final String CREATE_SP = "CREATE ";
\r
52 protected static final String UPDATE_SP = "UPDATE ";
\r
53 protected static final String DELETE_SP = "DELETE ";
\r
54 protected static final String SELECT_SP = "SELECT ";
\r
56 protected final String C_TEXT = getClass().getSimpleName() + " CREATE";
\r
57 protected final String R_TEXT = getClass().getSimpleName() + " READ";
\r
58 protected final String U_TEXT = getClass().getSimpleName() + " UPDATE";
\r
59 protected final String D_TEXT = getClass().getSimpleName() + " DELETE";
\r
60 private String table;
\r
62 protected final ConsistencyLevel readConsistency,writeConsistency;
\r
64 // Setteable only by CachedDAO
\r
65 protected Cached<?, ?> cache;
\r
68 * A Constructor from the originating Cluster. This DAO will open the Session at need,
\r
69 * and shutdown the session when "close()" is called.
\r
75 public CassDAOImpl(TRANS trans, String name, Cluster cluster, String keyspace, Class<DATA> dataClass, String table, ConsistencyLevel read, ConsistencyLevel write) {
\r
76 super(trans, name, cluster,keyspace,dataClass);
\r
78 readConsistency = read;
\r
79 writeConsistency = write;
\r
83 * A Constructor to share Session with other DAOs.
\r
85 * This method get the Session and Cluster information from the calling DAO, and won't
\r
86 * touch the Session on closure.
\r
91 public CassDAOImpl(TRANS trans, String name, AbsCassDAO<TRANS,?> aDao, Class<DATA> dataClass, String table, ConsistencyLevel read, ConsistencyLevel write) {
\r
92 super(trans, name, aDao,dataClass);
\r
94 readConsistency = read;
\r
95 writeConsistency = write;
\r
98 protected PSInfo createPS;
\r
99 protected PSInfo readPS;
\r
100 protected PSInfo updatePS;
\r
101 protected PSInfo deletePS;
\r
102 private boolean async=false;
\r
104 public void async(boolean bool) {
\r
108 public final String[] setCRUD(TRANS trans, String table, Class<?> dc,Loader<DATA> loader) {
\r
109 return setCRUD(trans, table, dc, loader, -1);
\r
112 public final String[] setCRUD(TRANS trans, String table, Class<?> dc,Loader<DATA> loader, int max) {
\r
113 Field[] fields = dc.getDeclaredFields();
\r
114 int end = max>=0 & max<fields.length?max:fields.length;
\r
115 // get keylimit from a non-null Loader
\r
116 int keylimit = loader.keylimit();
\r
118 StringBuilder sbfc = new StringBuilder();
\r
119 StringBuilder sbq = new StringBuilder();
\r
120 StringBuilder sbwc = new StringBuilder();
\r
121 StringBuilder sbup = new StringBuilder();
\r
124 for(int i=0;i<end;++i) {
\r
129 sbwc.append(" AND ");
\r
132 sbfc.append(fields[i].getName());
\r
138 sbup.append(fields[i].getName());
\r
142 sbwc.append(fields[i].getName());
\r
147 createPS = new PSInfo(trans, "INSERT INTO " + table + " ("+ sbfc +") VALUES ("+ sbq +");",loader,writeConsistency);
\r
149 readPS = new PSInfo(trans, "SELECT " + sbfc + " FROM " + table + " WHERE " + sbwc + ';',loader,readConsistency);
\r
151 // Note: UPDATES can't compile if there are no fields besides keys... Use "Insert"
\r
152 if(sbup.length()==0) {
\r
153 updatePS = createPS; // the same as an insert
\r
155 updatePS = new PSInfo(trans, "UPDATE " + table + " SET " + sbup + " WHERE " + sbwc + ';',loader,writeConsistency);
\r
158 deletePS = new PSInfo(trans, "DELETE FROM " + table + " WHERE " + sbwc + ';',loader,writeConsistency);
\r
160 return new String[] {sbfc.toString(), sbq.toString(), sbup.toString(), sbwc.toString()};
\r
163 public void replace(CRUD crud, PSInfo psInfo) {
\r
165 case create: createPS = psInfo; break;
\r
166 case read: readPS = psInfo; break;
\r
167 case update: updatePS = psInfo; break;
\r
168 case delete: deletePS = psInfo; break;
\r
172 public void disable(CRUD crud) {
\r
174 case create: createPS = null; break;
\r
175 case read: readPS = null; break;
\r
176 case update: updatePS = null; break;
\r
177 case delete: deletePS = null; break;
\r
183 * Given a DATA object, extract the individual elements from the Data into an Object Array for the
\r
186 public Result<DATA> create(TRANS trans, DATA data) {
\r
187 if(createPS==null) {
\r
188 Result.err(Result.ERR_NotImplemented,"Create is disabled for %s",getClass().getSimpleName());
\r
190 if(async) /*ResultSetFuture */ {
\r
191 Result<ResultSetFuture> rs = createPS.execAsync(trans, C_TEXT, data);
\r
193 return Result.err(rs);
\r
196 Result<ResultSet> rs = createPS.exec(trans, C_TEXT, data);
\r
198 return Result.err(rs);
\r
201 wasModified(trans, CRUD.create, data);
\r
202 return Result.ok(data);
\r
206 * Read the Unique Row associated with Full Keys
\r
208 public Result<List<DATA>> read(TRANS trans, DATA data) {
\r
210 Result.err(Result.ERR_NotImplemented,"Read is disabled for %s",getClass().getSimpleName());
\r
212 return readPS.read(trans, R_TEXT, data);
\r
215 public Result<List<DATA>> read(TRANS trans, Object ... key) {
\r
217 Result.err(Result.ERR_NotImplemented,"Read is disabled for %s",getClass().getSimpleName());
\r
219 return readPS.read(trans, R_TEXT, key);
\r
222 public Result<Void> update(TRANS trans, DATA data) {
\r
223 if(updatePS==null) {
\r
224 Result.err(Result.ERR_NotImplemented,"Update is disabled for %s",getClass().getSimpleName());
\r
226 if(async)/* ResultSet rs =*/ {
\r
227 Result<ResultSetFuture> rs = updatePS.execAsync(trans, U_TEXT, data);
\r
229 return Result.err(rs);
\r
232 Result<ResultSet> rs = updatePS.exec(trans, U_TEXT, data);
\r
234 return Result.err(rs);
\r
238 wasModified(trans, CRUD.update, data);
\r
239 return Result.ok();
\r
242 // This method Sig for Cached...
\r
243 public Result<Void> delete(TRANS trans, DATA data, boolean reread) {
\r
244 if(deletePS==null) {
\r
245 Result.err(Result.ERR_NotImplemented,"Delete is disabled for %s",getClass().getSimpleName());
\r
247 // Since Deleting will be stored off, for possible re-constitution, need the whole thing
\r
249 Result<List<DATA>> rd = read(trans,data);
\r
251 return Result.err(rd);
\r
254 return Result.err(Status.ERR_NotFound,"Not Found");
\r
256 for(DATA d : rd.value) {
\r
258 Result<ResultSetFuture> rs = deletePS.execAsync(trans, D_TEXT, d);
\r
260 return Result.err(rs);
\r
263 Result<ResultSet> rs = deletePS.exec(trans, D_TEXT, d);
\r
265 return Result.err(rs);
\r
268 wasModified(trans, CRUD.delete, d);
\r
271 if(async)/* ResultSet rs =*/ {
\r
272 Result<ResultSetFuture> rs = deletePS.execAsync(trans, D_TEXT, data);
\r
274 return Result.err(rs);
\r
277 Result<ResultSet> rs = deletePS.exec(trans, D_TEXT, data);
\r
279 return Result.err(rs);
\r
282 wasModified(trans, CRUD.delete, data);
\r
284 return Result.ok();
\r
287 public final Object[] keyFrom(DATA data) {
\r
288 return createPS.keyFrom(data);
\r
292 public String table() {
\r
296 public static final String CASS_READ_CONSISTENCY="cassandra.readConsistency";
\r
297 public static final String CASS_WRITE_CONSISTENCY="cassandra.writeConsistency";
\r
298 protected static ConsistencyLevel readConsistency(AuthzTrans trans, String table) {
\r
299 String prop = trans.getProperty(CASS_READ_CONSISTENCY+'.'+table);
\r
301 prop = trans.getProperty(CASS_READ_CONSISTENCY);
\r
303 return ConsistencyLevel.ONE; // this is Cassandra Default
\r
306 return ConsistencyLevel.valueOf(prop);
\r
309 protected static ConsistencyLevel writeConsistency(AuthzTrans trans, String table) {
\r
310 String prop = trans.getProperty(CASS_WRITE_CONSISTENCY+'.'+table);
\r
312 prop = trans.getProperty(CASS_WRITE_CONSISTENCY);
\r
314 return ConsistencyLevel.ONE; // this is Cassandra Default\
\r
317 return ConsistencyLevel.valueOf(prop);
\r
320 public static DataInputStream toDIS(ByteBuffer bb) {
\r
321 byte[] b = bb.array();
\r
322 return new DataInputStream(
\r
323 new ByteArrayInputStream(b,bb.position(),bb.limit())
\r