return;
}
if (isProxyServer()) {
- try {
- super.doGet(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doGet(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doPost(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doPost(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doDelete(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doDelete(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doGet(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doGet(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doPut(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doPut(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doGet(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doGet(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doPut(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doPut(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;
}
if (isProxyServer()) {
- try {
- super.doPost(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException" + ioe.getMessage());
- }
+ super.doPost(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
String path = req.getPathInfo();
if (path.startsWith("/api/")) {
if (isProxyOK(req) && isProxyServer()) {
- try {
- super.doDelete(req, resp);
- } catch (IOException ioe) {
- intlogger.error("IOException" + ioe.getMessage());
- }
+ super.doDelete(req, resp);
return;
}
String key = path.substring(5);
}
if (path.equals("/prov")) {
if (isProxyOK(req) && isProxyServer()) {
- try {
- if (super.doGetWithFallback(req, resp)) {
- return;
- }
- } catch (IOException ioe) {
- intlogger.error("IOException" + ioe.getMessage());
+ if (super.doGetWithFallback(req, resp)) {
+ return;
}
// fall back to returning the local data if the remote is unreachable
intlogger.info("Active server unavailable; falling back to local copy.");
}
if (path.startsWith("/api/")) {
if (isProxyOK(req) && isProxyServer()) {
- try {
- super.doGet(req, resp);
- } catch (IOException ioe) {
- intlogger.error("IOException" + ioe.getMessage());
- }
+ super.doGet(req, resp);
return;
}
String key = path.substring(5);
String path = req.getPathInfo();
if (path.startsWith("/api/")) {
if (isProxyOK(req) && isProxyServer()) {
- try {
- super.doPut(req, resp);
- } catch (IOException ioe) {
- intlogger.error("IOException" + ioe.getMessage());
- }
+ super.doPut(req, resp);
return;
}
String key = path.substring(5);
String path = req.getPathInfo();
if (path.startsWith("/api/")) {
if (isProxyOK(req) && isProxyServer()) {
- try {
- super.doPost(req, resp);
- } catch (IOException ioe) {
- intlogger.error("IOException" + ioe.getMessage());
- }
+ super.doPost(req, resp);
return;
}
String key = path.substring(5);
public static final String DEFAULT_TRUSTSTORE = "/opt/java/jdk/jdk180/jre/lib/security/cacerts";
public static final String KEYSTORE_TYPE_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.type";
public static final String KEYSTORE_PATH_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.path";
- public static final String KEYSTORE_PASSWORD_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.password";
+ public static final String KEYSTORE_PASS_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.password";
public static final String TRUSTSTORE_PATH_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.path";
- public static final String TRUSTSTORE_PASSWORD_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.password";
+ public static final String TRUSTSTORE_PASS_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.password";
/**
* The one and only {@link Server} instance in this JVM
Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");
// Check DB is accessible and contains the expected tables
- if (!checkDatabase(logger)) {
+ if (!checkDatabase()) {
System.exit(1);
}
// HTTPS connector
SslContextFactory sslContextFactory = new SslContextFactory();
sslContextFactory.setKeyStorePath(p.getProperty(KEYSTORE_PATH_PROPERTY));
- sslContextFactory.setKeyStorePassword(p.getProperty(KEYSTORE_PASSWORD_PROPERTY));
+ sslContextFactory.setKeyStorePassword(p.getProperty(KEYSTORE_PASS_PROPERTY));
sslContextFactory
.setKeyManagerPassword(p.getProperty("org.onap.dmaap.datarouter.provserver.keymanager.password"));
// SSL stuff
sslContextFactory.setKeyStoreType(p.getProperty(KEYSTORE_TYPE_PROPERTY, "jks"));
sslContextFactory.setKeyStorePath(p.getProperty(KEYSTORE_PATH_PROPERTY));
- sslContextFactory.setKeyStorePassword(p.getProperty(KEYSTORE_PASSWORD_PROPERTY));
+ sslContextFactory.setKeyStorePassword(p.getProperty(KEYSTORE_PASS_PROPERTY));
sslContextFactory
.setKeyManagerPassword(p.getProperty("org.onap.dmaap.datarouter.provserver.keymanager.password"));
String ts = p.getProperty(TRUSTSTORE_PATH_PROPERTY);
if (ts != null && ts.length() > 0) {
- System.out.println("@@ TS -> " + ts);
+ logger.info("@@ TS -> " + ts);
sslContextFactory.setTrustStorePath(ts);
- sslContextFactory.setTrustStorePassword(p.getProperty(TRUSTSTORE_PASSWORD_PROPERTY));
+ sslContextFactory.setTrustStorePassword(p.getProperty(TRUSTSTORE_PASS_PROPERTY));
} else {
sslContextFactory.setTrustStorePath(DEFAULT_TRUSTSTORE);
sslContextFactory.setTrustStorePassword("changeit");
logger.info("PROV0001 **** AT&T Data Router Provisioning Server halted.");
}
- private static boolean checkDatabase(Logger logger) {
+ private static boolean checkDatabase() {
DB db = new DB();
return db.runRetroFits();
}
import org.onap.dmaap.datarouter.provisioning.utils.DB;
import org.onap.dmaap.datarouter.provisioning.utils.URLUtilities;
+import static org.onap.dmaap.datarouter.provisioning.utils.HttpServletUtils.sendResponseError;
+
/**
* This class is the base class for those servlets that need to proxy their requests from the standby to active server.
* Its methods perform the proxy function to the active server. If the active server is not reachable, a 503
Properties props = (new DB()).getProperties();
String type = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");
String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);
- String pass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);
+ String pass = props.getProperty(Main.KEYSTORE_PASS_PROPERTY);
KeyStore keyStore = readStore(store, pass, type);
store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);
- pass = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);
+ pass = props.getProperty(Main.TRUSTSTORE_PASS_PROPERTY);
if (store == null || store.length() == 0) {
store = Main.DEFAULT_TRUSTSTORE;
pass = "changeit";
// We are connecting with the node name, but the certificate will have the CNAME
// So we need to accept a non-matching certificate name
SSLSocketFactory socketFactory = new SSLSocketFactory(keyStore,
- props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY), trustStore);
+ props.getProperty(Main.KEYSTORE_PASS_PROPERTY), trustStore);
socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
sch = new Scheme("https", 443, socketFactory);
inited = true;
} catch (Exception e) {
- e.printStackTrace();
+ intlogger.error("ProxyServlet: " + e.getMessage());
}
intlogger.info("ProxyServlet: inited = " + inited);
}
} catch (FileNotFoundException fileNotFoundException) {
intlogger.error("ProxyServlet: " + fileNotFoundException.getMessage());
} catch (Exception x) {
- System.err.println("READING TRUSTSTORE: " + x);
+ intlogger.error("READING TRUSTSTORE: " + x);
}
return ks;
}
* Issue a proxy DELETE to the active provisioning server.
*/
@Override
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) {
doProxy(req, resp, "DELETE");
}
* Issue a proxy GET to the active provisioning server.
*/
@Override
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) {
doProxy(req, resp, "GET");
}
* Issue a proxy PUT to the active provisioning server.
*/
@Override
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) {
doProxy(req, resp, "PUT");
}
* Issue a proxy POST to the active provisioning server.
*/
@Override
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) {
doProxy(req, resp, "POST");
}
*
* @return true if the proxy succeeded
*/
- public boolean doGetWithFallback(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ public boolean doGetWithFallback(HttpServletRequest req, HttpServletResponse resp) {
boolean rv = false;
if (inited) {
String url = buildUrl(req);
copyRequestHeaders(req, proxy);
// Execute the request
- HttpResponse pxy_response = httpclient.execute(proxy);
+ HttpResponse pxyResponse = httpclient.execute(proxy);
// Get response headers and body
- int code = pxy_response.getStatusLine().getStatusCode();
+ int code = pxyResponse.getStatusLine().getStatusCode();
resp.setStatus(code);
- copyResponseHeaders(pxy_response, resp);
-
- HttpEntity entity = pxy_response.getEntity();
- if (entity != null) {
- InputStream in = entity.getContent();
- IOUtils.copy(in, resp.getOutputStream());
- in.close();
- }
+ copyResponseHeaders(pxyResponse, resp);
+ copyEntityContent(pxyResponse, resp);
rv = true;
} catch (IOException e) {
- System.err.println("ProxyServlet: " + e);
- e.printStackTrace();
+ intlogger.error("ProxyServlet: " + e.getMessage());
} finally {
proxy.releaseConnection();
httpclient.getConnectionManager().shutdown();
return rv;
}
- private void doProxy(HttpServletRequest req, HttpServletResponse resp, final String method) throws IOException {
+ private void doProxy(HttpServletRequest req, HttpServletResponse resp, final String method) {
if (inited && isProxyServer()) {
String url = buildUrl(req);
intlogger.info("ProxyServlet: proxying " + method + " " + url);
}
// Execute the request
- HttpResponse pxy_response = httpclient.execute(proxy);
+ HttpResponse pxyResponse = httpclient.execute(proxy);
// Get response headers and body
- int code = pxy_response.getStatusLine().getStatusCode();
+ int code = pxyResponse.getStatusLine().getStatusCode();
resp.setStatus(code);
- copyResponseHeaders(pxy_response, resp);
-
- HttpEntity entity = pxy_response.getEntity();
- if (entity != null) {
- InputStream in = entity.getContent();
- IOUtils.copy(in, resp.getOutputStream());
- in.close();
- }
+ copyResponseHeaders(pxyResponse, resp);
+ copyEntityContent(pxyResponse, resp);
} catch (IOException e) {
intlogger.warn("ProxyServlet: " + e);
- resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
- e.printStackTrace();
+ sendResponseError(resp, HttpServletResponse.SC_SERVICE_UNAVAILABLE, "", intlogger);
} finally {
proxy.releaseConnection();
httpclient.getConnectionManager().shutdown();
}
} else {
intlogger.warn("ProxyServlet: proxy disabled");
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ sendResponseError(resp, HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG, intlogger);
}
}
}
}
+ private void copyEntityContent(HttpResponse pxyResponse, HttpServletResponse resp) {
+ HttpEntity entity = pxyResponse.getEntity();
+ if (entity != null) {
+ try (InputStream in = entity.getContent()) {
+ IOUtils.copy(in, resp.getOutputStream());
+ } catch (Exception e) {
+ intlogger.error("Exception: " + e.getMessage());
+ }
+ }
+ }
+
public class ProxyHttpRequest extends HttpEntityEnclosingRequestBase {
private final String method;
package org.onap.dmaap.datarouter.provisioning;
import java.io.IOException;
-import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-import java.util.Properties;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
setIpAndFqdnForEelf("doPost");
eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
redirect(req, resp);
+
}
- private void redirect(HttpServletRequest req, HttpServletResponse resp) throws IOException {
- String[] nodes = getNodes();
- if (nodes == null || nodes.length == 0) {
- resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "There are no nodes defined in the DR network.");
- } else {
- EventLogRecord elr = new EventLogRecord(req);
- int feedid = checkPath(req);
- if (feedid < 0) {
- String message = (feedid == -1)
- ? "Invalid request - Missing or bad feed number."
- : "Invalid request - Missing file ID.";
- elr.setMessage(message);
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);
- eventlogger.info(elr);
-
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ private void redirect(HttpServletRequest req, HttpServletResponse resp) {
+ try {
+ String[] nodes = getNodes();
+ if (nodes == null || nodes.length == 0) {
+ resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "There are no nodes defined in the DR network.");
} else {
- // Generate new URL
- String nextnode = getRedirectNode(feedid, req);
- nextnode = nextnode+":"+DB.HTTPS_PORT;
- String newurl = "https://" + nextnode + "/publish" + req.getPathInfo();
- String qs = req.getQueryString();
- if (qs != null)
- newurl += "?" + qs;
-
- // Log redirect in event log
- String message = "Redirected to: "+newurl;
- elr.setMessage(message);
- elr.setResult(HttpServletResponse.SC_MOVED_PERMANENTLY);
- eventlogger.info(elr);
-
- resp.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);
- resp.setHeader("Location", newurl);
+ EventLogRecord elr = new EventLogRecord(req);
+ int feedid = checkPath(req);
+ if (feedid < 0) {
+ String message = (feedid == -1)
+ ? "Invalid request - Missing or bad feed number."
+ : "Invalid request - Missing file ID.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ } else {
+ // Generate new URL
+ String nextnode = getRedirectNode(feedid, req);
+ nextnode = nextnode + ":" + DB.HTTPS_PORT;
+ String newurl = "https://" + nextnode + "/publish" + req.getPathInfo();
+ String qs = req.getQueryString();
+ if (qs != null)
+ newurl += "?" + qs;
+
+ // Log redirect in event log
+ String message = "Redirected to: " + newurl;
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_MOVED_PERMANENTLY);
+ eventlogger.info(elr);
+
+ resp.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);
+ resp.setHeader("Location", newurl);
+ }
}
+ } catch (IOException ioe) {
+ intlogger.error("IOException" + ioe.getMessage());
+
}
}
private String getRedirectNode(int feedid, HttpServletRequest req) {
return;\r
}\r
if (isProxyOK(req) && isProxyServer()) {\r
- try {\r
- super.doDelete(req, resp);\r
- } catch (IOException ioe) {\r
- eventlogger.error("IOException" + ioe.getMessage());\r
- }\r
+ super.doDelete(req, resp);\r
return;\r
}\r
\r
return;\r
}\r
if (isProxyOK(req) && isProxyServer()) {\r
- try {\r
- super.doGet(req, resp);\r
- } catch (IOException ioe) {\r
- eventlogger.error("IOException" + ioe.getMessage());\r
- }\r
+ super.doGet(req, resp);\r
return;\r
}\r
\r
return;\r
}\r
if (isProxyOK(req) && isProxyServer()) {\r
- try {\r
- super.doPost(req, resp);\r
- } catch (IOException ioe) {\r
- intlogger.error("IOException" + ioe.getMessage());\r
- }\r
+ super.doPost(req, resp);\r
return;\r
}\r
String path = req.getPathInfo();\r
return;
}
if (isProxyServer()) {
- try {
- super.doGet(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException: " + ioe.getMessage());
- }
+ super.doGet(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
* section in the <b>Provisioning API</b> document for details on how this method should be invoked.
*/
@Override
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) {
setIpAndFqdnForEelf("doPost");
eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
EventLogRecord elr = new EventLogRecord(req);
return;
}
if (isProxyServer()) {
- try {
- super.doPost(req, resp);
- } catch (IOException ioe) {
- eventlogger.error("IOException: " + ioe.getMessage());
- }
+ super.doPost(req, resp);
return;
}
String bhdr = req.getHeader(BEHALF_HEADER);
return;\r
}\r
if (isProxyServer()) {\r
- try {\r
- super.doDelete(req, resp);\r
- } catch (IOException ioe) {\r
- eventlogger.error("IOException: " + ioe.getMessage());\r
- }\r
+ super.doDelete(req, resp);\r
return;\r
}\r
String bhdr = req.getHeader(BEHALF_HEADER);\r
return;\r
}\r
if (isProxyServer()) {\r
- try {\r
- super.doGet(req, resp);\r
- } catch (IOException ioe) {\r
- eventlogger.error("IOException: " + ioe.getMessage());\r
- }\r
+ super.doGet(req, resp);\r
return;\r
}\r
String bhdr = req.getHeader(BEHALF_HEADER);\r
return;\r
}\r
if (isProxyServer()) {\r
- try {\r
- super.doPut(req, resp);\r
- } catch (IOException ioe) {\r
- eventlogger.error("IOException: " + ioe.getMessage());\r
- }\r
+ super.doPut(req, resp);\r
return;\r
}\r
String bhdr = req.getHeader(BEHALF_HEADER);\r
return;\r
}\r
if (isProxyServer()) {\r
- try {\r
- super.doPost(req, resp);\r
- } catch (IOException ioe) {\r
- eventlogger.error("IOException: " + ioe.getMessage());\r
- }\r
+ super.doPost(req, resp);\r
return;\r
}\r
String bhdr = req.getHeader(BEHALF_HEADER);\r
Properties props = (new DB()).getProperties();
String type = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");
String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);
- String pass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);
+ String pass = props.getProperty(Main.KEYSTORE_PASS_PROPERTY);
KeyStore keyStore = KeyStore.getInstance(type);
try(FileInputStream instream = new FileInputStream(new File(store))) {
keyStore.load(instream, pass.toCharArray());
}
store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);
- pass = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);
+ pass = props.getProperty(Main.TRUSTSTORE_PASS_PROPERTY);
KeyStore trustStore = null;
if (store != null && store.length() > 0) {
trustStore = KeyStore.getInstance(KeyStore.getDefaultType());
// We are connecting with the node name, but the certificate will have the CNAME
// So we need to accept a non-matching certificate name
String keystorepass = props.getProperty(
- Main.KEYSTORE_PASSWORD_PROPERTY); //itrack.web.att.com/browse/DATARTR-6 for changing hard coded passphase ref
+ Main.KEYSTORE_PASS_PROPERTY); //itrack.web.att.com/browse/DATARTR-6 for changing hard coded passphase ref
try(AbstractHttpClient hc = new DefaultHttpClient()) {
SSLSocketFactory socketFactory =
(trustStore == null)
}
} catch (Exception e) {
logger.warn("PROV0020: Caught exception in SynchronizerTask: " + e);
- e.printStackTrace();
}
}
* Synchronize the Feeds in the JSONArray, with the Feeds in the DB.
*/
private void syncFeeds(JSONArray ja) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (int n = 0; n < ja.length(); n++) {
try {
Feed f = new Feed(ja.getJSONObject(n));
* Synchronize the Subscriptions in the JSONArray, with the Subscriptions in the DB.
*/
private void syncSubs(JSONArray ja) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (int n = 0; n < ja.length(); n++) {
try {
//Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
* Rally:US708115 - Synchronize the Groups in the JSONArray, with the Groups in the DB.
*/
private void syncGroups(JSONArray ja) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (int n = 0; n < ja.length(); n++) {
try {
Group g = new Group(ja.getJSONObject(n));
* Synchronize the Parameters in the JSONObject, with the Parameters in the DB.
*/
private void syncParams(JSONObject jo) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (String k : jo.keySet()) {
String v = "";
try {
}
private void syncIngressRoutes(JSONArray ja) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (int n = 0; n < ja.length(); n++) {
try {
IngressRoute in = new IngressRoute(ja.getJSONObject(n));
}
private void syncEgressRoutes(JSONObject jo) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (String key : jo.keySet()) {
try {
int sub = Integer.parseInt(key);
}
private void syncNetworkRoutes(JSONArray ja) {
- Collection<Syncable> coll = new ArrayList<Syncable>();
+ Collection<Syncable> coll = new ArrayList<>();
for (int n = 0; n < ja.length(); n++) {
try {
NetworkRoute nr = new NetworkRoute(ja.getJSONObject(n));
try {
Map<String, Syncable> newmap = getMap(newc);
Map<String, Syncable> oldmap = getMap(oldc);
- Set<String> union = new TreeSet<String>(newmap.keySet());
+ Set<String> union = new TreeSet<>(newmap.keySet());
union.addAll(oldmap.keySet());
DB db = new DB();
@SuppressWarnings("resource")
db.release(conn);
} catch (SQLException e) {
logger.warn("PROV5009: problem during sync, exception: " + e);
- e.printStackTrace();
}
return changes;
}
private Map<String, Syncable> getMap(Collection<? extends Syncable> c) {
- Map<String, Syncable> map = new HashMap<String, Syncable>();
+ Map<String, Syncable> map = new HashMap<>();
for (Syncable v : c) {
map.put(v.getKey(), v);
}
\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return set;\r
}\r
ps.close();\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return v;\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
rv = true;\r
} catch (SQLException e) {\r
intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return list;\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
intlogger.info("getMaxSubID: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return max;\r
}\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return list;\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
intlogger.warn("PROV0008 countActiveSubscriptions: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return count;\r
}\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return set;\r
}\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return rv;\r
}\r
ps.close();\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return v;\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return rv;\r
}\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return set;\r
}\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
rv = true;\r
} catch (SQLException e) {\r
intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
return set;\r
}\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
rv = true;\r
} catch (SQLException e) {\r
intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
}\r
} catch (SQLException e) {\r
rv = false;\r
intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
} finally {\r
try {\r
if(ps!=null) {\r
ps.close();\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ intlogger.error("SQLException " + e.getMessage());\r
}\r
}\r
return rv;\r
}\r
} catch (Exception e) {\r
logger.warn("PROV0020: Caught exception in LogfileLoader: " + e);\r
- e.printStackTrace();\r
}\r
}\r
}\r
}\r
} catch (SQLException e) {\r
System.err.println(e);\r
- e.printStackTrace();\r
+ logger.error(e);\r
} finally {\r
db.release(conn);\r
}\r
}\r
} catch (SQLException e) {\r
System.err.println(e);\r
- e.printStackTrace();\r
+ logger.error(e);\r
} finally {\r
db.release(conn);\r
}\r
}\r
} catch (SQLException e) {\r
System.err.println(e);\r
- e.printStackTrace();\r
+ logger.error(e);\r
} finally {\r
db.release(conn);\r
}\r
logger.debug(String.format("initializeNextid, next ID is %d (%x)", nextid, nextid));\r
} catch (SQLException e) {\r
System.err.println(e);\r
- e.printStackTrace();\r
+ logger.error(e);\r
} finally {\r
db.release(conn);\r
}\r
} catch (SQLException e) {\r
logger.warn("PROV8003 Invalid value in record: " + line);\r
logger.debug(e);\r
- e.printStackTrace();\r
} catch (NumberFormatException e) {\r
logger.warn("PROV8004 Invalid number in record: " + line);\r
logger.debug(e);\r
- e.printStackTrace();\r
} catch (ParseException e) {\r
logger.warn("PROV8005 Invalid date in record: " + line);\r
logger.debug(e);\r
- e.printStackTrace();\r
} catch (Exception e) {\r
logger.warn("PROV8006 Invalid pattern in record: " + line);\r
logger.debug(e);\r
- e.printStackTrace();\r
}\r
total++;\r
}\r
import java.io.File;\r
import java.util.Properties;\r
import java.util.TimerTask;\r
+import org.apache.log4j.Logger;\r
\r
/**\r
- * This class provides a {@link TimerTask} that purges old logfiles\r
- * (older than the number of days specified by the org.onap.dmaap.datarouter.provserver.logretention property).\r
+ * This class provides a {@link TimerTask} that purges old logfiles (older than the number of days specified by the\r
+ * org.onap.dmaap.datarouter.provserver.logretention property).\r
*\r
* @author Robert Eby\r
* @version $Id: PurgeLogDirTask.java,v 1.2 2013/07/05 13:48:05 eby Exp $\r
*/\r
public class PurgeLogDirTask extends TimerTask {\r
+\r
private static final long ONEDAY = 86400000L;\r
\r
private final String logdir;\r
private final long interval;\r
+ private Logger utilsLogger;\r
\r
public PurgeLogDirTask() {\r
Properties p = (new DB()).getProperties();\r
logdir = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");\r
String s = p.getProperty("org.onap.dmaap.datarouter.provserver.logretention", "30");\r
+\r
+ this.utilsLogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.utils");\r
+\r
long n = 30;\r
try {\r
n = Long.parseLong(s);\r
if (dir.exists()) {\r
long exptime = System.currentTimeMillis() - interval;\r
for (File logfile : dir.listFiles()) {\r
- if (logfile.lastModified() < exptime)\r
+ if (logfile.lastModified() < exptime) {\r
logfile.delete();\r
+ }\r
}\r
}\r
} catch (Exception e) {\r
- e.printStackTrace();\r
+ utilsLogger.error("Exception: " + e.getMessage());\r
}\r
}\r
}\r
import java.util.List;\r
import java.util.Map;\r
import java.util.TreeSet;\r
-\r
import org.onap.dmaap.datarouter.provisioning.utils.DB;\r
\r
/**\r
* @version $Id: DailyLatencyReport.java,v 1.2 2013/11/06 16:23:54 eby Exp $\r
*/\r
public class DailyLatencyReport extends ReportBase {\r
+\r
private static final String SELECT_SQL =\r
"select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +\r
- " where EVENT_TIME >= ? and EVENT_TIME <= ?";\r
+ " where EVENT_TIME >= ? and EVENT_TIME <= ?";\r
\r
private class Job {\r
+\r
public long pubtime = 0;\r
public long clen = 0;\r
public List<Long> deltime = new ArrayList<Long>();\r
+\r
public long minLatency() {\r
long n = deltime.isEmpty() ? 0 : Long.MAX_VALUE;\r
- for (Long l : deltime)\r
- n = Math.min(n, l-pubtime);\r
+ for (Long l : deltime) {\r
+ n = Math.min(n, l - pubtime);\r
+ }\r
return n;\r
}\r
+\r
public long maxLatency() {\r
long n = 0;\r
- for (Long l : deltime)\r
- n = Math.max(n, l-pubtime);\r
+ for (Long l : deltime) {\r
+ n = Math.max(n, l - pubtime);\r
+ }\r
return n;\r
}\r
+\r
public long totalLatency() {\r
long n = 0;\r
- for (Long l : deltime)\r
- n += (l-pubtime);\r
+ for (Long l : deltime) {\r
+ n += (l - pubtime);\r
+ }\r
return n;\r
}\r
}\r
+\r
private class Counters {\r
+\r
public final String date;\r
public final int feedid;\r
public final Map<String, Job> jobs;\r
+\r
public Counters(String d, int fid) {\r
date = d;\r
feedid = fid;\r
- jobs = new HashMap<String, Job>();\r
+ jobs = new HashMap<>();\r
}\r
+\r
public void addEvent(long etime, String type, String id, String fid, long clen) {\r
Job j = jobs.get(id);\r
if (j == null) {\r
j.deltime.add(etime);\r
}\r
}\r
+\r
@Override\r
public String toString() {\r
long minsize = Long.MAX_VALUE, maxsize = 0, avgsize = 0;\r
- long minl = Long.MAX_VALUE, maxl = 0;\r
- long fanout = 0, totall = 0, totaln = 0;\r
+ long minl = Long.MAX_VALUE, maxl = 0;\r
+ long fanout = 0, totall = 0, totaln = 0;\r
for (Job j : jobs.values()) {\r
minsize = Math.min(minsize, j.clen);\r
maxsize = Math.max(maxsize, j.clen);\r
avgsize += j.clen;\r
- minl = Math.min(minl, j.minLatency());\r
- maxl = Math.max(maxl, j.maxLatency());\r
- totall += j.totalLatency();\r
- totaln += j.deltime.size();\r
- fanout += j.deltime.size();\r
+ minl = Math.min(minl, j.minLatency());\r
+ maxl = Math.max(maxl, j.maxLatency());\r
+ totall += j.totalLatency();\r
+ totaln += j.deltime.size();\r
+ fanout += j.deltime.size();\r
}\r
if (jobs.size() > 0) {\r
avgsize /= jobs.size();\r
- fanout /= jobs.size();\r
+ fanout /= jobs.size();\r
}\r
long avgl = (totaln > 0) ? (totall / totaln) : 0;\r
- return date + "," + feedid + "," + minsize + "," + maxsize + "," + avgsize + "," + minl + "," + maxl + "," + avgl + "," + fanout;\r
+ return date + "," + feedid + "," + minsize + "," + maxsize + "," + avgsize + "," + minl + "," + maxl + ","\r
+ + avgl + "," + fanout;\r
}\r
}\r
+\r
private long getPstart(String t) {\r
- if (t.indexOf('.') > 0)\r
+ if (t.indexOf('.') >= 0) {\r
t = t.substring(0, t.indexOf('.'));\r
+ }\r
return Long.parseLong(t);\r
}\r
\r
@Override\r
public void run() {\r
- Map<String, Counters> map = new HashMap<String, Counters>();\r
+ Map<String, Counters> map = new HashMap<>();\r
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
long start = System.currentTimeMillis();\r
try {\r
DB db = new DB();\r
@SuppressWarnings("resource")\r
Connection conn = db.getConnection();\r
- try(PreparedStatement ps = conn.prepareStatement(SELECT_SQL)) {\r
+ try (PreparedStatement ps = conn.prepareStatement(SELECT_SQL)) {\r
ps.setLong(1, from);\r
ps.setLong(2, to);\r
- try(ResultSet rs = ps.executeQuery()) {\r
+ try (ResultSet rs = ps.executeQuery()) {\r
while (rs.next()) {\r
String id = rs.getString("PUBLISH_ID");\r
int feed = rs.getInt("FEEDID");\r
db.release(conn);\r
}\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ logger.error("SQLException: " + e.getMessage());\r
}\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- try (PrintWriter os = new PrintWriter(outfile)){\r
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
+ try (PrintWriter os = new PrintWriter(outfile)) {\r
os.println("date,feedid,minsize,maxsize,avgsize,minlat,maxlat,avglat,fanout");\r
- for (String key : new TreeSet<String>(map.keySet())) {\r
+ for (String key : new TreeSet<>(map.keySet())) {\r
Counters c = map.get(key);\r
os.println(c.toString());\r
}\r
} catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
+ System.err.println("File cannot be written: " + outfile);\r
+ logger.error("FileNotFoundException: " + e.getMessage());\r
}\r
}\r
}\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ logger.error(e);\r
}\r
logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
try (PrintWriter os = new PrintWriter(outfile)) {\r
os.print(sb.toString());\r
} catch (FileNotFoundException e) {\r
System.err.println("File cannot be written: " + outfile);\r
+ logger.error(e);\r
}\r
}\r
\r
feedmap.put("pubcount", n + 1);\r
} catch (JSONException e) {\r
feedmap.put("pubcount", 1);\r
+ logger.error(e);\r
}\r
} else if (type.equals("del")) {\r
String subid = "" + rs.getInt("DELIVERY_SUBID");\r
feedmap.put(subid, n + 1);\r
} catch (JSONException e) {\r
feedmap.put(subid, 1);\r
+ logger.error(e);\r
}\r
}\r
}\r
}\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ logger.error(e);\r
}\r
logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
try {\r
os.close();\r
} catch (FileNotFoundException e) {\r
System.err.println("File cannot be written: " + outfile);\r
+ logger.error(e);\r
}\r
}\r
\r
*\r
* @param args\r
*/\r
- public static void main(String[] args) {\r
+ public void main(String[] args) {\r
int rtype = 0; // 0 -> day, 1 -> week, 2 -> month, 3 -> year\r
String infile = null;\r
String outfile = null;\r
feedmap.put("pubcount", n + count);\r
} catch (JSONException e) {\r
feedmap.put("pubcount", count);\r
+ logger.error(e);\r
}\r
} else if (type.equals("del")) {\r
String subid = tt[3];\r
feedmap.put(subid, n + count);\r
} catch (JSONException e) {\r
feedmap.put(subid, count);\r
+ logger.error(e);\r
}\r
}\r
}\r
System.out.println(t);\r
} catch (Exception e) {\r
System.err.println(e);\r
- e.printStackTrace();\r
+ logger.error(e);\r
}\r
}\r
}\r
} catch (FileNotFoundException e) {\r
System.err.println("File cannot be written: " + outfile);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ logger.error("SQLException: " + e.getMessage());\r
}\r
logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
}\r
\r
db.release(conn);\r
} catch (SQLException e) {\r
- e.printStackTrace();\r
+ logger.error("SQLException: " + e.getMessage());\r
}\r
logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
try (PrintWriter os = new PrintWriter(outfile)){\r
}\r
} catch (FileNotFoundException e) {\r
System.err.println("File cannot be written: " + outfile);\r
+ logger.error("FileNotFoundException: " + e.getMessage());\r
}\r
}\r
}\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * *\r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * *\r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.reports;\r
-\r
-import java.io.FileNotFoundException;\r
-import java.io.PrintWriter;\r
-import java.sql.Connection;\r
-import java.sql.PreparedStatement;\r
-import java.sql.ResultSet;\r
-import java.sql.SQLException;\r
-import java.text.SimpleDateFormat;\r
-import java.util.Date;\r
-import java.util.HashMap;\r
-import java.util.Map;\r
-import java.util.TreeSet;\r
-\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.provisioning.utils.DB;\r
-\r
-/**\r
- * Generate a traffic volume report. The report is a .csv file containing the following columns:\r
- * <table>\r
- * <tr><td>date</td><td>the date for this record</td></tr>\r
- * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>\r
- * <tr><td>filespublished</td><td>the number of files published on this feed and date</td></tr>\r
- * <tr><td>bytespublished</td><td>the number of bytes published on this feed and date</td></tr>\r
- * <tr><td>filesdelivered</td><td>the number of files delivered on this feed and date</td></tr>\r
- * <tr><td>bytesdelivered</td><td>the number of bytes delivered on this feed and date</td></tr>\r
- * <tr><td>filesexpired</td><td>the number of files expired on this feed and date</td></tr>\r
- * <tr><td>bytesexpired</td><td>the number of bytes expired on this feed and date</td></tr>\r
- * </table>\r
- *\r
- * @author Robert P. Eby\r
- * @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $\r
- */\r
-public class VolumeReport extends ReportBase {\r
- private static final String SELECT_SQL = "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT" +\r
- " from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?";\r
- private Logger loggerVolumeReport=Logger.getLogger("org.onap.dmaap.datarouter.reports");\r
- private class Counters {\r
- public int filespublished, filesdelivered, filesexpired;\r
- public long bytespublished, bytesdelivered, bytesexpired;\r
-\r
- @Override\r
- public String toString() {\r
- return String.format("%d,%d,%d,%d,%d,%d",\r
- filespublished, bytespublished, filesdelivered,\r
- bytesdelivered, filesexpired, bytesexpired);\r
- }\r
- }\r
-\r
- @Override\r
- public void run() {\r
- Map<String, Counters> map = new HashMap<String, Counters>();\r
- SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
- long start = System.currentTimeMillis();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- // We need to run this SELECT in stages, because otherwise we run out of memory!\r
- final long stepsize = 6000000L;\r
- boolean go_again = true;\r
- for (long i = 0; go_again; i += stepsize) {\r
- try (PreparedStatement ps = conn.prepareStatement(SELECT_SQL)) {\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- ps.setLong(3, i);\r
- ps.setLong(4, stepsize);\r
- try(ResultSet rs = ps.executeQuery()) {\r
- go_again = false;\r
- while (rs.next()) {\r
- go_again = true;\r
- long etime = rs.getLong("EVENT_TIME");\r
- String type = rs.getString("TYPE");\r
- int feed = rs.getInt("FEEDID");\r
- long clen = rs.getLong("CONTENT_LENGTH");\r
- String key = sdf.format(new Date(etime)) + ":" + feed;\r
- Counters c = map.get(key);\r
- if (c == null) {\r
- c = new Counters();\r
- map.put(key, c);\r
- }\r
- if (type.equalsIgnoreCase("pub")) {\r
- c.filespublished++;\r
- c.bytespublished += clen;\r
- } else if (type.equalsIgnoreCase("del")) {\r
- // Only count successful deliveries\r
- int statusCode = rs.getInt("RESULT");\r
- if (statusCode >= 200 && statusCode < 300) {\r
- c.filesdelivered++;\r
- c.bytesdelivered += clen;\r
- }\r
- } else if (type.equalsIgnoreCase("exp")) {\r
- c.filesexpired++;\r
- c.bytesexpired += clen;\r
- }\r
- }\r
- }\r
-\r
- }\r
- catch (SQLException sqlException)\r
- {\r
- loggerVolumeReport.error("SqlException",sqlException);\r
- }\r
- }\r
-\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
- try (PrintWriter os = new PrintWriter(outfile)) {\r
- os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");\r
- for(String key :new TreeSet<String>(map.keySet()))\r
- {\r
- Counters c = map.get(key);\r
- String[] p = key.split(":");\r
- os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));\r
- }\r
- }\r
- catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: " + outfile);\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.reports;
+
+import java.io.FileNotFoundException;
+import java.io.PrintWriter;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TreeSet;
+
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.provisioning.utils.DB;
+
+/**
+ * Generate a traffic volume report. The report is a .csv file containing the following columns:
+ * <table>
+ * <tr><td>date</td><td>the date for this record</td></tr>
+ * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>
+ * <tr><td>filespublished</td><td>the number of files published on this feed and date</td></tr>
+ * <tr><td>bytespublished</td><td>the number of bytes published on this feed and date</td></tr>
+ * <tr><td>filesdelivered</td><td>the number of files delivered on this feed and date</td></tr>
+ * <tr><td>bytesdelivered</td><td>the number of bytes delivered on this feed and date</td></tr>
+ * <tr><td>filesexpired</td><td>the number of files expired on this feed and date</td></tr>
+ * <tr><td>bytesexpired</td><td>the number of bytes expired on this feed and date</td></tr>
+ * </table>
+ *
+ * @author Robert P. Eby
+ * @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $
+ */
+public class VolumeReport extends ReportBase {
+ private static final String SELECT_SQL = "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT" +
+ " from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?";
+ private Logger loggerVolumeReport=Logger.getLogger("org.onap.dmaap.datarouter.reports");
+ private class Counters {
+ int filespublished, filesdelivered, filesexpired;
+ long bytespublished, bytesdelivered, bytesexpired;
+
+ @Override
+ public String toString() {
+ return String.format("%d,%d,%d,%d,%d,%d",
+ filespublished, bytespublished, filesdelivered,
+ bytesdelivered, filesexpired, bytesexpired);
+ }
+ }
+
+ @Override
+ public void run() {
+ Map<String, Counters> map = new HashMap<String, Counters>();
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
+ long start = System.currentTimeMillis();
+ try {
+ DB db = new DB();
+ @SuppressWarnings("resource")
+ Connection conn = db.getConnection();
+ // We need to run this SELECT in stages, because otherwise we run out of memory!
+ final long stepsize = 6000000L;
+ boolean go_again = true;
+ for (long i = 0; go_again; i += stepsize) {
+ try (PreparedStatement ps = conn.prepareStatement(SELECT_SQL)) {
+ ps.setLong(1, from);
+ ps.setLong(2, to);
+ ps.setLong(3, i);
+ ps.setLong(4, stepsize);
+ try(ResultSet rs = ps.executeQuery()) {
+ go_again = false;
+ while (rs.next()) {
+ go_again = true;
+ long etime = rs.getLong("EVENT_TIME");
+ String type = rs.getString("TYPE");
+ int feed = rs.getInt("FEEDID");
+ long clen = rs.getLong("CONTENT_LENGTH");
+ String key = sdf.format(new Date(etime)) + ":" + feed;
+ Counters c = map.get(key);
+ if (c == null) {
+ c = new Counters();
+ map.put(key, c);
+ }
+ if (type.equalsIgnoreCase("pub")) {
+ c.filespublished++;
+ c.bytespublished += clen;
+ } else if (type.equalsIgnoreCase("del")) {
+ // Only count successful deliveries
+ int statusCode = rs.getInt("RESULT");
+ if (statusCode >= 200 && statusCode < 300) {
+ c.filesdelivered++;
+ c.bytesdelivered += clen;
+ }
+ } else if (type.equalsIgnoreCase("exp")) {
+ c.filesexpired++;
+ c.bytesexpired += clen;
+ }
+ }
+ }
+ }
+ catch (SQLException sqlException)
+ {
+ loggerVolumeReport.error("SqlException",sqlException);
+ }
+ }
+
+ db.release(conn);
+ } catch (SQLException e) {
+ loggerVolumeReport.error("SQLException: " + e.getMessage());
+ }
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");
+ try (PrintWriter os = new PrintWriter(outfile)) {
+ os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");
+ for(String key :new TreeSet<String>(map.keySet()))
+ {
+ Counters c = map.get(key);
+ String[] p = key.split(":");
+ os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));
+ }
+ }
+ catch (FileNotFoundException e) {
+ System.err.println("File cannot be written: " + outfile);
+ }
+ }
+}