Refactor Prov DB handling
[dmaap/datarouter.git] / datarouter-prov / src / main / java / org / onap / dmaap / datarouter / reports / VolumeReport.java
1 /*******************************************************************************
2  * ============LICENSE_START==================================================
3  * * org.onap.dmaap
4  * * ===========================================================================
5  * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6  * * ===========================================================================
7  * * Licensed under the Apache License, Version 2.0 (the "License");
8  * * you may not use this file except in compliance with the License.
9  * * You may obtain a copy of the License at
10  * *
11  *  *      http://www.apache.org/licenses/LICENSE-2.0
12  * *
13  *  * Unless required by applicable law or agreed to in writing, software
14  * * distributed under the License is distributed on an "AS IS" BASIS,
15  * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * * See the License for the specific language governing permissions and
17  * * limitations under the License.
18  * * ============LICENSE_END====================================================
19  * *
20  * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
21  * *
22  ******************************************************************************/
23
24
25 package org.onap.dmaap.datarouter.reports;
26
27 import com.att.eelf.configuration.EELFLogger;
28 import com.att.eelf.configuration.EELFManager;
29 import java.io.FileNotFoundException;
30 import java.io.PrintWriter;
31 import java.sql.Connection;
32 import java.sql.PreparedStatement;
33 import java.sql.ResultSet;
34 import java.sql.SQLException;
35 import java.text.SimpleDateFormat;
36 import java.util.Date;
37 import java.util.HashMap;
38 import java.util.Map;
39 import java.util.TreeSet;
40 import org.onap.dmaap.datarouter.provisioning.utils.ProvDbUtils;
41
42 /**
43  * Generate a traffic volume report. The report is a .csv file containing the following columns:
44  * <table>
45  * <tr><td>date</td><td>the date for this record</td></tr>
46  * <tr><td>feedid</td><td>the Feed ID for this record</td></tr>
47  * <tr><td>filespublished</td><td>the number of files published on this feed and date</td></tr>
48  * <tr><td>bytespublished</td><td>the number of bytes published on this feed and date</td></tr>
49  * <tr><td>filesdelivered</td><td>the number of files delivered on this feed and date</td></tr>
50  * <tr><td>bytesdelivered</td><td>the number of bytes delivered on this feed and date</td></tr>
51  * <tr><td>filesexpired</td><td>the number of files expired on this feed and date</td></tr>
52  * <tr><td>bytesexpired</td><td>the number of bytes expired on this feed and date</td></tr>
53  * </table>
54  *
55  * @author Robert P. Eby
56  * @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $
57  */
58 public class VolumeReport extends ReportBase {
59     private EELFLogger loggerVolumeReport= EELFManager.getInstance().getLogger("ReportLog");
60     private class Counters {
61         int filespublished, filesdelivered, filesexpired;
62         long bytespublished, bytesdelivered, bytesexpired;
63
64         @Override
65         public String toString() {
66             return String.format("%d,%d,%d,%d,%d,%d",
67                 filespublished, bytespublished, filesdelivered,
68                 bytesdelivered, filesexpired, bytesexpired);
69         }
70     }
71
72     @Override
73     public void run() {
74         Map<String, Counters> map = new HashMap<>();
75         SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
76         long start = System.currentTimeMillis();
77         try (Connection conn = ProvDbUtils.getInstance().getConnection()) {
78             // We need to run this SELECT in stages, because otherwise we run out of memory!
79             final long stepsize = 6000000L;
80             boolean goAgain = true;
81             for (long i = 0; goAgain; i += stepsize) {
82                 try (PreparedStatement ps = conn.prepareStatement(
83                     "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT from LOG_RECORDS "
84                         + "where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?")) {
85                     ps.setLong(1, from);
86                     ps.setLong(2, to);
87                     ps.setLong(3, i);
88                     ps.setLong(4, stepsize);
89                     try (ResultSet rs = ps.executeQuery()) {
90                         goAgain = false;
91                         while (rs.next()) {
92                             goAgain = true;
93                             long etime = rs.getLong("EVENT_TIME");
94                             String type = rs.getString("TYPE");
95                             int feed = rs.getInt("FEEDID");
96                             long clen = rs.getLong("CONTENT_LENGTH");
97                             String key = sdf.format(new Date(etime)) + ":" + feed;
98                             Counters c = map.get(key);
99                             if (c == null) {
100                                 c = new Counters();
101                                 map.put(key, c);
102                             }
103                             if (type.equalsIgnoreCase("pub")) {
104                                 c.filespublished++;
105                                 c.bytespublished += clen;
106                             } else if (type.equalsIgnoreCase("del")) {
107                                 // Only count successful deliveries
108                                 int statusCode = rs.getInt("RESULT");
109                                 if (statusCode >= 200 && statusCode < 300) {
110                                     c.filesdelivered++;
111                                     c.bytesdelivered += clen;
112                                 }
113                             } else if (type.equalsIgnoreCase("exp")) {
114                                 c.filesexpired++;
115                                 c.bytesexpired += clen;
116                             }
117                         }
118                     }
119                 } catch (SQLException sqlException) {
120                     loggerVolumeReport.error("SqlException", sqlException);
121                 }
122             }
123         } catch (SQLException e) {
124             loggerVolumeReport.error("SQLException: " + e.getMessage());
125         }
126         logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");
127         try (PrintWriter os = new PrintWriter(outfile)) {
128             os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");
129             for(String key :new TreeSet<String>(map.keySet()))
130             {
131                 Counters c = map.get(key);
132                 String[] p = key.split(":");
133                 os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));
134             }
135         }
136         catch (FileNotFoundException e) {
137             System.err.println("File cannot be written: " + outfile);
138         }
139     }
140 }