1 /*******************************************************************************
\r
2 * ============LICENSE_START==================================================
\r
4 * * ===========================================================================
\r
5 * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
\r
6 * * ===========================================================================
\r
7 * * Licensed under the Apache License, Version 2.0 (the "License");
\r
8 * * you may not use this file except in compliance with the License.
\r
9 * * You may obtain a copy of the License at
\r
11 * * http://www.apache.org/licenses/LICENSE-2.0
\r
13 * * Unless required by applicable law or agreed to in writing, software
\r
14 * * distributed under the License is distributed on an "AS IS" BASIS,
\r
15 * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
\r
16 * * See the License for the specific language governing permissions and
\r
17 * * limitations under the License.
\r
18 * * ============LICENSE_END====================================================
\r
20 * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
\r
22 ******************************************************************************/
\r
25 package org.onap.dmaap.datarouter.reports;
\r
27 import java.io.FileNotFoundException;
\r
28 import java.io.PrintWriter;
\r
29 import java.sql.Connection;
\r
30 import java.sql.PreparedStatement;
\r
31 import java.sql.ResultSet;
\r
32 import java.sql.SQLException;
\r
33 import java.text.SimpleDateFormat;
\r
34 import java.util.Date;
\r
35 import java.util.HashMap;
\r
36 import java.util.Map;
\r
37 import java.util.TreeSet;
\r
39 import org.apache.log4j.Logger;
\r
40 import org.onap.dmaap.datarouter.provisioning.utils.DB;
\r
43 * Generate a traffic volume report. The report is a .csv file containing the following columns:
\r
45 * <tr><td>date</td><td>the date for this record</td></tr>
\r
46 * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>
\r
47 * <tr><td>filespublished</td><td>the number of files published on this feed and date</td></tr>
\r
48 * <tr><td>bytespublished</td><td>the number of bytes published on this feed and date</td></tr>
\r
49 * <tr><td>filesdelivered</td><td>the number of files delivered on this feed and date</td></tr>
\r
50 * <tr><td>bytesdelivered</td><td>the number of bytes delivered on this feed and date</td></tr>
\r
51 * <tr><td>filesexpired</td><td>the number of files expired on this feed and date</td></tr>
\r
52 * <tr><td>bytesexpired</td><td>the number of bytes expired on this feed and date</td></tr>
\r
55 * @author Robert P. Eby
\r
56 * @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $
\r
58 public class VolumeReport extends ReportBase {
\r
59 private static final String SELECT_SQL = "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT" +
\r
60 " from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?";
\r
61 private Logger loggerVolumeReport=Logger.getLogger("org.onap.dmaap.datarouter.reports");
\r
62 private class Counters {
\r
63 public int filespublished, filesdelivered, filesexpired;
\r
64 public long bytespublished, bytesdelivered, bytesexpired;
\r
67 public String toString() {
\r
68 return String.format("%d,%d,%d,%d,%d,%d",
\r
69 filespublished, bytespublished, filesdelivered,
\r
70 bytesdelivered, filesexpired, bytesexpired);
\r
76 Map<String, Counters> map = new HashMap<String, Counters>();
\r
77 SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
\r
78 long start = System.currentTimeMillis();
\r
81 @SuppressWarnings("resource")
\r
82 Connection conn = db.getConnection();
\r
83 // We need to run this SELECT in stages, because otherwise we run out of memory!
\r
84 final long stepsize = 6000000L;
\r
85 boolean go_again = true;
\r
86 for (long i = 0; go_again; i += stepsize) {
\r
87 try (PreparedStatement ps = conn.prepareStatement(SELECT_SQL)) {
\r
88 ps.setLong(1, from);
\r
91 ps.setLong(4, stepsize);
\r
92 try(ResultSet rs = ps.executeQuery()) {
\r
96 long etime = rs.getLong("EVENT_TIME");
\r
97 String type = rs.getString("TYPE");
\r
98 int feed = rs.getInt("FEEDID");
\r
99 long clen = rs.getLong("CONTENT_LENGTH");
\r
100 String key = sdf.format(new Date(etime)) + ":" + feed;
\r
101 Counters c = map.get(key);
\r
103 c = new Counters();
\r
106 if (type.equalsIgnoreCase("pub")) {
\r
107 c.filespublished++;
\r
108 c.bytespublished += clen;
\r
109 } else if (type.equalsIgnoreCase("del")) {
\r
110 // Only count successful deliveries
\r
111 int statusCode = rs.getInt("RESULT");
\r
112 if (statusCode >= 200 && statusCode < 300) {
\r
113 c.filesdelivered++;
\r
114 c.bytesdelivered += clen;
\r
116 } else if (type.equalsIgnoreCase("exp")) {
\r
118 c.bytesexpired += clen;
\r
124 catch (SQLException sqlException)
\r
126 loggerVolumeReport.error("SqlException",sqlException);
\r
131 } catch (SQLException e) {
\r
132 e.printStackTrace();
\r
134 logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");
\r
135 try (PrintWriter os = new PrintWriter(outfile)) {
\r
136 os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");
\r
137 for(String key :new TreeSet<String>(map.keySet()))
\r
139 Counters c = map.get(key);
\r
140 String[] p = key.split(":");
\r
141 os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));
\r
144 catch (FileNotFoundException e) {
\r
145 System.err.println("File cannot be written: " + outfile);
\r