1 /*******************************************************************************
2 * ============LICENSE_START==================================================
4 * * ===========================================================================
5 * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6 * * ===========================================================================
7 * * Licensed under the Apache License, Version 2.0 (the "License");
8 * * you may not use this file except in compliance with the License.
9 * * You may obtain a copy of the License at
11 * * http://www.apache.org/licenses/LICENSE-2.0
13 * * Unless required by applicable law or agreed to in writing, software
14 * * distributed under the License is distributed on an "AS IS" BASIS,
15 * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * * See the License for the specific language governing permissions and
17 * * limitations under the License.
18 * * ============LICENSE_END====================================================
20 * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
22 ******************************************************************************/
25 package org.onap.dmaap.datarouter.provisioning.utils;
27 import com.att.eelf.configuration.EELFLogger;
28 import com.att.eelf.configuration.EELFManager;
30 import java.io.FileInputStream;
31 import java.io.FileReader;
32 import java.io.IOException;
33 import java.io.InputStreamReader;
34 import java.io.LineNumberReader;
35 import java.io.Reader;
36 import java.nio.file.Files;
37 import java.sql.Connection;
38 import java.sql.PreparedStatement;
39 import java.sql.ResultSet;
40 import java.sql.SQLException;
41 import java.sql.Statement;
42 import java.text.ParseException;
43 import java.util.Date;
44 import java.util.HashMap;
45 import java.util.Iterator;
47 import java.util.TreeSet;
48 import java.util.zip.GZIPInputStream;
49 import org.onap.dmaap.datarouter.provisioning.BaseServlet;
50 import org.onap.dmaap.datarouter.provisioning.beans.DeliveryExtraRecord;
51 import org.onap.dmaap.datarouter.provisioning.beans.DeliveryRecord;
52 import org.onap.dmaap.datarouter.provisioning.beans.ExpiryRecord;
53 import org.onap.dmaap.datarouter.provisioning.beans.Loadable;
54 import org.onap.dmaap.datarouter.provisioning.beans.LogRecord;
55 import org.onap.dmaap.datarouter.provisioning.beans.Parameters;
56 import org.onap.dmaap.datarouter.provisioning.beans.PubFailRecord;
57 import org.onap.dmaap.datarouter.provisioning.beans.PublishRecord;
60 * This class provides methods that run in a separate thread, in order to process logfiles uploaded into the spooldir.
61 * These logfiles are loaded into the MariaDB LOG_RECORDS table. In a running provisioning server, there should only be
62 * two places where records can be loaded into this table; here, and in the method DB.retroFit4() which may be run at
63 * startup to load the old (1.0) style log tables into LOG_RECORDS;
64 * <p>This method maintains an {@link RLEBitSet} which can be used to easily see what records are presently in the
66 * This bit set is used to synchronize between provisioning servers.</p>
69 * @version $Id: LogfileLoader.java,v 1.22 2014/03/12 19:45:41 eby Exp $
71 public class LogfileLoader extends Thread {
73 * NOT USED: Percentage of free space required before old records are removed.
75 public static final int REQUIRED_FREE_PCT = 20;
78 * This is a singleton -- there is only one LogfileLoader object in the server.
80 private static LogfileLoader logfileLoader;
83 * The PreparedStatement which is loaded by a <i>Loadable</i>.
85 private static final String INSERT_SQL = "insert into LOG_RECORDS values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
87 * Each server can assign this many IDs.
89 private static final long SET_SIZE = (1L << 56);
91 private final EELFLogger logger;
93 private final String spooldir;
94 private final long setStart;
95 private final long setEnd;
96 private RLEBitSet seqSet;
100 private LogfileLoader() {
101 this.logger = EELFManager.getInstance().getLogger("InternalLog");
103 this.spooldir = db.getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");
104 this.setStart = getIdRange();
105 this.setEnd = setStart + SET_SIZE - 1;
106 this.seqSet = new RLEBitSet();
109 this.setDaemon(true);
110 this.setName("LogfileLoader");
114 * Get the singleton LogfileLoader object, and start it if it is not running.
116 * @return the LogfileLoader
118 public static synchronized LogfileLoader getLoader() {
119 if (logfileLoader == null) {
120 logfileLoader = new LogfileLoader();
122 if (!logfileLoader.isAlive()) {
123 logfileLoader.start();
125 return logfileLoader;
128 private long getIdRange() {
130 if (BaseServlet.isInitialActivePOD()) {
132 } else if (BaseServlet.isInitialStandbyPOD()) {
137 String r = String.format("[%X .. %X]", n, n + SET_SIZE - 1);
138 logger.debug("This server shall assign RECORD_IDs in the range " + r);
143 * Return the bit set representing the record ID's that are loaded in this database.
145 * @return the bit set
147 public RLEBitSet getBitSet() {
152 * True if the LogfileLoader is currently waiting for work.
154 * @return true if idle
156 public boolean isIdle() {
161 * Run continuously to look for new logfiles in the spool directory and import them into the DB.
162 * The spool is checked once per second. If free space on the MariaDB filesystem falls below
163 * REQUIRED_FREE_PCT (normally 20%) then the oldest logfile entries are removed and the LOG_RECORDS
164 * table is compacted until free space rises above the threshold.
171 File dirfile = new File(spooldir);
173 runLogFileLoad(dirfile);
175 } catch (Exception e) {
176 logger.warn("PROV0020: Caught exception in LogfileLoader: " + e);
181 private void runLogFileLoad(File filesDir) {
182 File[] inFiles = filesDir.listFiles((dir, name) -> name.startsWith("IN."));
183 if (inFiles != null) {
184 if (inFiles.length == 0) {
188 } catch (InterruptedException e) {
189 Thread.currentThread().interrupt();
194 if (pruneRecords()) {
195 // Removed at least some entries, recompute the bit map
198 for (File file : inFiles) {
205 private void processFile(File infile) {
206 if (logger.isDebugEnabled()) {
207 logger.debug("PROV8001 Starting " + infile + " ...");
209 long time = System.currentTimeMillis();
210 int[] n = process(infile);
211 time = System.currentTimeMillis() - time;
212 logger.info(String.format("PROV8000 Processed %s in %d ms; %d of %d records.",
213 infile.toString(), time, n[0], n[1]));
215 Files.delete(infile.toPath());
216 } catch (IOException e) {
217 logger.info("PROV8001 failed to delete file " + infile.getName(), e);
221 boolean pruneRecords() {
222 boolean did1 = false;
223 long count = countRecords();
224 Parameters defaultLogRetention = Parameters.getParameter(Parameters.DEFAULT_LOG_RETENTION);
225 long threshold = (defaultLogRetention != null) ? Long.parseLong(defaultLogRetention.getValue()) : 1000000L;
226 Parameters provLogRetention = Parameters.getParameter(Parameters.PROV_LOG_RETENTION);
227 if (provLogRetention != null) {
229 long n = Long.parseLong(provLogRetention.getValue());
230 // This check is to prevent inadvertent errors from wiping the table out
234 } catch (NumberFormatException e) {
238 logger.debug("Pruning LOG_RECORD table: records in DB=" + count + ", threshold=" + threshold);
239 if (count > threshold) {
240 // we need to remove this many records
242 // histogram of records per day
243 Map<Long, Long> hist = getHistogram();
244 // Determine the cutoff point to remove the needed number of records
247 for (Long day : new TreeSet<>(hist.keySet())) {
248 sum += hist.get(day);
257 logger.debug(" Pruning records older than=" + (cutoff / 86400000L) + " (" + new Date(cutoff) + ")");
259 Connection conn = null;
261 // Limit to a million at a time to avoid typing up the DB for too long.
262 conn = db.getConnection();
263 try (PreparedStatement ps = conn.prepareStatement("DELETE from LOG_RECORDS where EVENT_TIME < ? limit 1000000")) {
264 ps.setLong(1, cutoff);
267 int dcount = ps.getUpdateCount();
269 logger.debug(" " + dcount + " rows deleted.");
270 did1 |= (dcount != 0);
272 count = 0; // prevent inf. loops
275 count = 0; // shouldn't happen!
279 try (Statement stmt = conn.createStatement()) {
280 stmt.execute("OPTIMIZE TABLE LOG_RECORDS");
282 } catch (SQLException e) {
283 logger.error("LogfileLoader.pruneRecords: " + e.getMessage(), e);
291 private long countRecords() {
293 try (Connection conn = db.getConnection();
294 Statement stmt = conn.createStatement()) {
295 try (ResultSet rs = stmt.executeQuery("SELECT COUNT(*) as COUNT from LOG_RECORDS")) {
297 count = rs.getLong("COUNT");
302 } catch (SQLException e) {
303 logger.error("LogfileLoader.countRecords: " + e.getMessage(), e);
308 private Map<Long, Long> getHistogram() {
309 Map<Long, Long> map = new HashMap<>();
310 try (Connection conn = db.getConnection();
311 Statement stmt = conn.createStatement()) {
312 logger.debug(" LOG_RECORD table histogram...");
313 try (ResultSet rs = stmt.executeQuery("SELECT FLOOR(EVENT_TIME/86400000) AS DAY, COUNT(*) AS COUNT FROM LOG_RECORDS GROUP BY DAY")) {
315 long day = rs.getLong("DAY");
316 long cnt = rs.getLong("COUNT");
318 logger.debug(" " + day + " " + cnt);
323 } catch (SQLException e) {
324 logger.error("LogfileLoader.getHistogram: " + e.getMessage(), e);
329 private void initializeNextid() {
330 Connection conn = null;
332 conn = db.getConnection();
333 RLEBitSet nbs = new RLEBitSet();
334 try (Statement stmt = conn.createStatement()) {
335 // Build a bitset of all records in the LOG_RECORDS table
336 // We need to run this SELECT in stages, because otherwise we run out of memory!
337 final long stepsize = 6000000L;
338 boolean goAgain = true;
339 for (long i = 0; goAgain; i += stepsize) {
340 String sql = String.format("select RECORD_ID from LOG_RECORDS LIMIT %d,%d", i, stepsize);
341 try (ResultSet rs = stmt.executeQuery(sql)) {
344 long n = rs.getLong("RECORD_ID");
352 // Compare with the range for this server
353 // Determine the next ID for this set of record IDs
354 RLEBitSet tbs = (RLEBitSet) nbs.clone();
355 RLEBitSet idset = new RLEBitSet();
356 idset.set(setStart, setStart + SET_SIZE);
358 long t = tbs.length();
359 nextId = (t == 0) ? setStart : (t - 1);
360 if (nextId >= setStart + SET_SIZE) {
361 // Handle wraparound, when the IDs reach the end of our "range"
363 Iterator<Long[]> li = tbs.getRangeIterator();
364 while (li.hasNext()) {
368 tbs.clear(last[0], last[1] + 1);
370 nextId = (t == 0) ? setStart : (t - 1);
373 logger.debug(String.format("LogfileLoader.initializeNextid, next ID is %d (%x)", nextId, nextId));
374 } catch (SQLException e) {
375 logger.error("LogfileLoader.initializeNextid: " + e.getMessage(), e);
381 @SuppressWarnings("resource")
382 int[] process(File f) {
386 Connection conn = db.getConnection();
387 PreparedStatement ps = conn.prepareStatement(INSERT_SQL);
388 Reader r = f.getPath().endsWith(".gz")
389 ? new InputStreamReader(new GZIPInputStream(new FileInputStream(f)))
391 try (LineNumberReader in = new LineNumberReader(r)) {
393 while ((line = in.readLine()) != null) {
395 for (Loadable rec : buildRecords(line)) {
397 if (rec instanceof LogRecord) {
398 LogRecord lr = ((LogRecord) rec);
399 if (!seqSet.get(lr.getRecordId())) {
401 seqSet.set(lr.getRecordId());
403 logger.debug("Duplicate record ignored: " + lr.getRecordId());
406 if (++nextId > setEnd) {
409 ps.setLong(18, nextId);
413 ps.clearParameters();
416 } catch (SQLException e) {
417 logger.warn("PROV8003 Invalid value in record: " + line, e);
418 } catch (NumberFormatException e) {
419 logger.warn("PROV8004 Invalid number in record: " + line, e);
420 } catch (ParseException e) {
421 logger.warn("PROV8005 Invalid date in record: " + line, e);
422 } catch (Exception e) {
423 logger.warn("PROV8006 Invalid pattern in record: " + line, e);
430 } catch (SQLException | IOException e) {
431 logger.warn("PROV8007 Exception reading " + f + ": " + e);
433 return new int[]{ok, total};
436 Loadable[] buildRecords(String line) throws ParseException {
437 String[] pp = line.split("\\|");
438 if (pp != null && pp.length >= 7) {
439 String rtype = pp[1].toUpperCase();
440 if ("PUB".equals(rtype) && pp.length == 11) {
441 // Fields are: date|PUB|pubid|feedid|requrl|method|ctype|clen|srcip|user|status
442 return new Loadable[]{new PublishRecord(pp)};
444 if ("DEL".equals(rtype) && pp.length == 12) {
445 // Fields are: date|DEL|pubid|feedid|subid|requrl|method|ctype|clen|user|status|xpubid
446 String[] subs = pp[4].split("\\s+");
448 Loadable[] rv = new Loadable[subs.length];
449 for (int i = 0; i < subs.length; i++) {
450 // create a new record for each individual sub
452 rv[i] = new DeliveryRecord(pp);
457 if ("EXP".equals(rtype) && pp.length == 11) {
458 // Fields are: date|EXP|pubid|feedid|subid|requrl|method|ctype|clen|reason|attempts
459 ExpiryRecord e = new ExpiryRecord(pp);
460 if ("other".equals(e.getReason())) {
461 logger.info("Invalid reason '" + pp[9] + "' changed to 'other' for record: " + e.getPublishId());
463 return new Loadable[]{e};
465 if ("PBF".equals(rtype) && pp.length == 12) {
466 // Fields are: date|PBF|pubid|feedid|requrl|method|ctype|clen-expected|clen-received|srcip|user|error
467 return new Loadable[]{new PubFailRecord(pp)};
469 if ("DLX".equals(rtype) && pp.length == 7) {
470 // Fields are: date|DLX|pubid|feedid|subid|clen-tosend|clen-sent
471 return new Loadable[]{new DeliveryExtraRecord(pp)};
473 if ("LOG".equals(rtype) && (pp.length == 19 || pp.length == 20)) {
474 // Fields are: date|LOG|pubid|feedid|requrl|method|ctype|clen|type|feedFileid|remoteAddr|user|status|subid|fileid|result|attempts|reason|record_id
475 return new Loadable[]{new LogRecord(pp)};
478 logger.warn("PROV8002 bad record: " + line);
479 return new Loadable[0];
483 * The LogfileLoader can be run stand-alone by invoking the main() method of this class.
487 public static void main(String[] a) throws InterruptedException {
488 LogfileLoader.getLoader();
489 Thread.sleep(200000L);