1 /*******************************************************************************
2 * ============LICENSE_START==================================================
4 * * ===========================================================================
5 * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6 * * ===========================================================================
7 * * Licensed under the Apache License, Version 2.0 (the "License");
8 * * you may not use this file except in compliance with the License.
9 * * You may obtain a copy of the License at
11 * * http://www.apache.org/licenses/LICENSE-2.0
13 * * Unless required by applicable law or agreed to in writing, software
14 * * distributed under the License is distributed on an "AS IS" BASIS,
15 * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * * See the License for the specific language governing permissions and
17 * * limitations under the License.
18 * * ============LICENSE_END====================================================
20 * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
22 ******************************************************************************/
25 package org.onap.dmaap.datarouter.reports;
27 import com.att.eelf.configuration.EELFLogger;
28 import com.att.eelf.configuration.EELFManager;
29 import java.io.FileNotFoundException;
30 import java.io.PrintWriter;
31 import java.sql.Connection;
32 import java.sql.PreparedStatement;
33 import java.sql.ResultSet;
34 import java.sql.SQLException;
35 import java.text.SimpleDateFormat;
36 import java.util.Date;
37 import java.util.HashMap;
39 import java.util.TreeSet;
40 import org.onap.dmaap.datarouter.provisioning.utils.ProvDbUtils;
43 * Generate a traffic volume report. The report is a .csv file containing the following columns:
45 * <tr><td>date</td><td>the date for this record</td></tr>
46 * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>
47 * <tr><td>filespublished</td><td>the number of files published on this feed and date</td></tr>
48 * <tr><td>bytespublished</td><td>the number of bytes published on this feed and date</td></tr>
49 * <tr><td>filesdelivered</td><td>the number of files delivered on this feed and date</td></tr>
50 * <tr><td>bytesdelivered</td><td>the number of bytes delivered on this feed and date</td></tr>
51 * <tr><td>filesexpired</td><td>the number of files expired on this feed and date</td></tr>
52 * <tr><td>bytesexpired</td><td>the number of bytes expired on this feed and date</td></tr>
55 * @author Robert P. Eby
56 * @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $
58 public class VolumeReport extends ReportBase {
59 private EELFLogger loggerVolumeReport= EELFManager.getInstance().getLogger("ReportLog");
60 private class Counters {
61 int filespublished, filesdelivered, filesexpired;
62 long bytespublished, bytesdelivered, bytesexpired;
65 public String toString() {
66 return String.format("%d,%d,%d,%d,%d,%d",
67 filespublished, bytespublished, filesdelivered,
68 bytesdelivered, filesexpired, bytesexpired);
74 Map<String, Counters> map = new HashMap<>();
75 SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
76 long start = System.currentTimeMillis();
77 try (Connection conn = ProvDbUtils.getInstance().getConnection()) {
78 // We need to run this SELECT in stages, because otherwise we run out of memory!
79 final long stepsize = 6000000L;
80 boolean goAgain = true;
81 for (long i = 0; goAgain; i += stepsize) {
82 try (PreparedStatement ps = conn.prepareStatement(
83 "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT from LOG_RECORDS "
84 + "where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?")) {
88 ps.setLong(4, stepsize);
89 try (ResultSet rs = ps.executeQuery()) {
93 long etime = rs.getLong("EVENT_TIME");
94 String type = rs.getString("TYPE");
95 int feed = rs.getInt("FEEDID");
96 long clen = rs.getLong("CONTENT_LENGTH");
97 String key = sdf.format(new Date(etime)) + ":" + feed;
98 Counters c = map.get(key);
103 if (type.equalsIgnoreCase("pub")) {
105 c.bytespublished += clen;
106 } else if (type.equalsIgnoreCase("del")) {
107 // Only count successful deliveries
108 int statusCode = rs.getInt("RESULT");
109 if (statusCode >= 200 && statusCode < 300) {
111 c.bytesdelivered += clen;
113 } else if (type.equalsIgnoreCase("exp")) {
115 c.bytesexpired += clen;
119 } catch (SQLException sqlException) {
120 loggerVolumeReport.error("SqlException", sqlException);
123 } catch (SQLException e) {
124 loggerVolumeReport.error("SQLException: " + e.getMessage());
126 logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");
127 try (PrintWriter os = new PrintWriter(outfile)) {
128 os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");
129 for(String key :new TreeSet<String>(map.keySet()))
131 Counters c = map.get(key);
132 String[] p = key.split(":");
133 os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));
136 catch (FileNotFoundException e) {
137 System.err.println("File cannot be written: " + outfile);