-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- *\r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- *\r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">\r
- <modelVersion>4.0.0</modelVersion>\r
- <parent>\r
- <groupId>org.onap.dmaap.datarouter</groupId>\r
- <artifactId>parent</artifactId>\r
- <version>1.0.1-SNAPSHOT</version>\r
- <relativePath>../pom.xml</relativePath>\r
- </parent>\r
- <artifactId>datarouter-node</artifactId>\r
- <packaging>jar</packaging>\r
- <name>datarouter-node</name>\r
- <url>https://github.com/att/DMAAP_DATAROUTER</url>\r
- <properties>\r
- <sonar.skip>false</sonar.skip>\r
- <sonar.jacoco.reportMissing.force.zero>true</sonar.jacoco.reportMissing.force.zero>\r
- <sitePath>/content/sites/site/${project.groupId}/${project.artifactId}/${project.version}</sitePath>\r
- <docker.location>${basedir}/target/${artifactId}</docker.location>\r
- <datarouter.node.image.name>onap/dmaap/datarouter-node</datarouter.node.image.name>\r
- </properties>\r
- <dependencies>\r
- <dependency>\r
- <groupId>junit</groupId>\r
- <artifactId>junit</artifactId>\r
- <version>3.8.1</version>\r
- <scope>test</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.json</groupId>\r
- <artifactId>json</artifactId>\r
- <version>20160810</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>javax.mail</groupId>\r
- <artifactId>javax.mail-api</artifactId>\r
- <version>1.5.1</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>com.att.eelf</groupId>\r
- <artifactId>eelf-core</artifactId>\r
- <version>0.0.1</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>javax.servlet</groupId>\r
- <artifactId>servlet-api</artifactId>\r
- <version>2.5</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>com.thoughtworks.xstream</groupId>\r
- <artifactId>xstream</artifactId>\r
- <version>1.4.7</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>ch.qos.logback</groupId>\r
- <artifactId>logback-classic</artifactId>\r
- <version>1.2.0</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>ch.qos.logback</groupId>\r
- <artifactId>logback-core</artifactId>\r
- <version>1.2.0</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-server</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-continuation</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-util</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-deploy</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-servlet</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-servlets</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-http</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-security</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-websocket</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-io</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.commons</groupId>\r
- <artifactId>commons-io</artifactId>\r
- <version>1.3.2</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>commons-lang</groupId>\r
- <artifactId>commons-lang</artifactId>\r
- <version>2.4</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>commons-io</groupId>\r
- <artifactId>commons-io</artifactId>\r
- <version>2.1</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.httpcomponents</groupId>\r
- <artifactId>httpcore</artifactId>\r
- <version>4.4</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>commons-codec</groupId>\r
- <artifactId>commons-codec</artifactId>\r
- <version>1.6</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.mozilla</groupId>\r
- <artifactId>rhino</artifactId>\r
- <version>1.7R3</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.james</groupId>\r
- <artifactId>apache-mime4j-core</artifactId>\r
- <version>0.7</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.httpcomponents</groupId>\r
- <artifactId>httpclient</artifactId>\r
- <version>4.5.3</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.sonatype.http-testing-harness</groupId>\r
- <artifactId>junit-runner</artifactId>\r
- <version>0.11</version>\r
- <exclusions>\r
- <exclusion>\r
- <groupId>org.databene</groupId>\r
- <artifactId>contiperf</artifactId>\r
- </exclusion>\r
- </exclusions>\r
- </dependency>\r
- <dependency>\r
- <groupId>log4j</groupId>\r
- <artifactId>log4j</artifactId>\r
- <version>1.2.17</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- </dependencies>\r
- <profiles>\r
- <profile>\r
- <id>docker</id>\r
- <properties>\r
- <skipDockerBuild>false</skipDockerBuild>\r
- <skipDockerTag>false</skipDockerTag>\r
- <skipTests>true</skipTests>\r
- </properties>\r
- <build>\r
- <plugins>\r
- <plugin>\r
- <groupId>com.spotify</groupId>\r
- <artifactId>docker-maven-plugin</artifactId>\r
- <version>1.0.0</version>\r
- <configuration>\r
- <imageName>${onap.nexus.dockerregistry.daily}/${datarouter.node.image.name}</imageName>\r
- <dockerDirectory>${docker.location}</dockerDirectory>\r
- <serverId>${onap.nexus.dockerregistry.daily}</serverId>\r
- <skipDockerBuild>false</skipDockerBuild>\r
- <imageTags>\r
- <imageTag>${project.version}</imageTag>\r
- <imageTag>latest</imageTag>\r
- </imageTags>\r
- <forceTags>true</forceTags>\r
- <resources>\r
- <resource>\r
- <targetPath>/</targetPath>\r
- <directory>${project.basedir}</directory>\r
- <excludes>\r
- <exclude>target/**/*</exclude>\r
- <exclude>pom.xml</exclude>\r
- </excludes>\r
- </resource>\r
-\r
- <resource>\r
- <targetPath>/</targetPath>\r
- <directory>${project.build.directory}</directory>\r
- <include>**/**</include>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </plugin>\r
- </plugins>\r
- </build>\r
- </profile>\r
- </profiles>\r
-\r
- <build>\r
- <finalName>datarouter-node</finalName>\r
- <resources>\r
- <resource>\r
- <directory>src/main/resources</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/*.properties</include>\r
- </includes>\r
- </resource>\r
- <resource>\r
- <directory>src/main/resources</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/EelfMessages.properties</include>\r
- </includes>\r
- </resource>\r
- <resource>\r
- <directory>src/main/resources</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/log4j.properties</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- <plugins>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-compiler-plugin</artifactId>\r
- <configuration>\r
- <source>1.8</source>\r
- <target>1.8</target>\r
- </configuration>\r
- <version>3.6.0</version>\r
- </plugin>\r
- <plugin>\r
- <artifactId>maven-assembly-plugin</artifactId>\r
- <version>2.4</version>\r
- <configuration>\r
- <descriptorRefs>\r
- <descriptorRef>jar-with-dependencies</descriptorRef>\r
- </descriptorRefs>\r
- <outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>\r
- <archive>\r
- <manifest>\r
- <addClasspath>true</addClasspath>\r
- <mainClass>org.onap.dmaap.datarouter.node.NodeMain</mainClass>\r
- </manifest>\r
- </archive>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <id>make-assembly</id>\r
- <!-- this is used for inheritance merges -->\r
- <phase>package</phase>\r
- <!-- bind to the packaging phase -->\r
- <goals>\r
- <goal>single</goal>\r
- </goals>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-resources-plugin</artifactId>\r
- <version>2.7</version>\r
- <executions>\r
- <execution>\r
- <id>copy-docker-file</id>\r
- <phase>package</phase>\r
- <goals>\r
- <goal>copy-resources</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${docker.location}</outputDirectory>\r
- <overwrite>true</overwrite>\r
- <resources>\r
- <resource>\r
- <directory>${basedir}/src/main/resources/docker</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/*</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>copy-resources</id>\r
- <phase>validate</phase>\r
- <goals>\r
- <goal>copy-resources</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${basedir}/target/opt/app/datartr/etc</outputDirectory>\r
- <resources>\r
- <resource>\r
- <directory>${basedir}/src/main/resources</directory>\r
- <includes>\r
- <include>misc/**</include>\r
- <include>**/**</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>copy-resources-1</id>\r
- <phase>validate</phase>\r
- <goals>\r
- <goal>copy-resources</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${basedir}/target/opt/app/datartr/self_signed</outputDirectory>\r
- <resources>\r
- <resource>\r
- <directory>${basedir}/self_signed</directory>\r
- <includes>\r
- <include>misc/**</include>\r
- <include>**/**</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-dependency-plugin</artifactId>\r
- <version>2.10</version>\r
- <executions>\r
- <execution>\r
- <id>copy-dependencies</id>\r
- <phase>package</phase>\r
- <goals>\r
- <goal>copy-dependencies</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${project.build.directory}/opt/app/datartr/lib</outputDirectory>\r
- <overWriteReleases>false</overWriteReleases>\r
- <overWriteSnapshots>false</overWriteSnapshots>\r
- <overWriteIfNewer>true</overWriteIfNewer>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-javadoc-plugin</artifactId>\r
- <configuration>\r
- <failOnError>false</failOnError>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <id>attach-javadocs</id>\r
- <goals>\r
- <goal>jar</goal>\r
- </goals>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-source-plugin</artifactId>\r
- <version>2.2.1</version>\r
- <executions>\r
- <execution>\r
- <id>attach-sources</id>\r
- <goals>\r
- <goal>jar-no-fork</goal>\r
- </goals>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.codehaus.mojo</groupId>\r
- <artifactId>cobertura-maven-plugin</artifactId>\r
- <version>2.7</version>\r
- <configuration>\r
- <formats>\r
- <format>html</format>\r
- <format>xml</format>\r
- </formats>\r
- <check/>\r
- </configuration>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.sonatype.plugins</groupId>\r
- <artifactId>nexus-staging-maven-plugin</artifactId>\r
- <version>1.6.7</version>\r
- <extensions>true</extensions>\r
- <configuration>\r
- <nexusUrl>${onap.nexus.url}</nexusUrl>\r
- <stagingProfileId>176c31dfe190a</stagingProfileId>\r
- <serverId>ecomp-staging</serverId>\r
- </configuration>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.jacoco</groupId>\r
- <artifactId>jacoco-maven-plugin</artifactId>\r
- <version>${jacoco.version}</version>\r
- <configuration>\r
- <excludes>\r
- <exclude>**/gen/**</exclude>\r
- <exclude>**/generated-sources/**</exclude>\r
- <exclude>**/yang-gen/**</exclude>\r
- <exclude>**/pax/**</exclude>\r
- </excludes>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <id>pre-unit-test</id>\r
- <goals>\r
- <goal>prepare-agent</goal>\r
- </goals>\r
- <configuration>\r
- <destFile>${project.build.directory}/code-coverage/jacoco-ut.exec</destFile>\r
- <propertyName>surefireArgLine</propertyName>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>post-unit-test</id>\r
- <phase>test</phase>\r
- <goals>\r
- <goal>report</goal>\r
- </goals>\r
- <configuration>\r
- <dataFile>${project.build.directory}/code-coverage/jacoco-ut.exec</dataFile>\r
- <outputDirectory>${project.reporting.outputDirectory}/jacoco-ut</outputDirectory>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>pre-integration-test</id>\r
- <phase>pre-integration-test</phase>\r
- <goals>\r
- <goal>prepare-agent</goal>\r
- </goals>\r
- <configuration>\r
- <destFile>${project.build.directory}/code-coverage/jacoco-it.exec</destFile>\r
- <propertyName>failsafeArgLine</propertyName>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>post-integration-test</id>\r
- <phase>post-integration-test</phase>\r
- <goals>\r
- <goal>report</goal>\r
- </goals>\r
- <configuration>\r
- <dataFile>${project.build.directory}/code-coverage/jacoco-it.exec</dataFile>\r
- <outputDirectory>${project.reporting.outputDirectory}/jacoco-it</outputDirectory>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- </plugins>\r
- </build>\r
-</project>\r
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dmaap.datarouter</groupId>
+ <artifactId>parent</artifactId>
+ <version>1.0.1-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+ <artifactId>datarouter-node</artifactId>
+ <packaging>jar</packaging>
+ <name>datarouter-node</name>
+ <url>https://github.com/att/DMAAP_DATAROUTER</url>
+ <properties>
+ <sonar.skip>false</sonar.skip>
+ <sonar.jacoco.reportMissing.force.zero>true</sonar.jacoco.reportMissing.force.zero>
+ <sitePath>/content/sites/site/${project.groupId}/${project.artifactId}/${project.version}</sitePath>
+ <docker.location>${basedir}/target/${artifactId}</docker.location>
+ <datarouter.node.image.name>onap/dmaap/datarouter-node</datarouter.node.image.name>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>3.8.1</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.json</groupId>
+ <artifactId>json</artifactId>
+ <version>20160810</version>
+ </dependency>
+ <dependency>
+ <groupId>javax.mail</groupId>
+ <artifactId>javax.mail-api</artifactId>
+ <version>1.5.1</version>
+ </dependency>
+ <dependency>
+ <groupId>com.att.eelf</groupId>
+ <artifactId>eelf-core</artifactId>
+ <version>0.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>servlet-api</artifactId>
+ <version>2.5</version>
+ </dependency>
+ <dependency>
+ <groupId>com.thoughtworks.xstream</groupId>
+ <artifactId>xstream</artifactId>
+ <version>1.4.7</version>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-classic</artifactId>
+ <version>1.2.0</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-core</artifactId>
+ <version>1.2.0</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-server</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-continuation</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-util</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-deploy</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlet</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlets</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-http</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-security</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-websocket</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-io</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-io</artifactId>
+ <version>1.3.2</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-lang</groupId>
+ <artifactId>commons-lang</artifactId>
+ <version>2.4</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ <version>2.1</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpcore</artifactId>
+ <version>4.4</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <version>1.6</version>
+ </dependency>
+ <dependency>
+ <groupId>org.mozilla</groupId>
+ <artifactId>rhino</artifactId>
+ <version>1.7R3</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.james</groupId>
+ <artifactId>apache-mime4j-core</artifactId>
+ <version>0.7</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ <version>4.5.3</version>
+ </dependency>
+ <dependency>
+ <groupId>org.sonatype.http-testing-harness</groupId>
+ <artifactId>junit-runner</artifactId>
+ <version>0.11</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.databene</groupId>
+ <artifactId>contiperf</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>1.2.17</version>
+ <scope>compile</scope>
+ </dependency>
+ </dependencies>
+ <profiles>
+ <profile>
+ <id>docker</id>
+ <properties>
+ <skipDockerBuild>false</skipDockerBuild>
+ <skipDockerTag>false</skipDockerTag>
+ <skipTests>true</skipTests>
+ </properties>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.spotify</groupId>
+ <artifactId>docker-maven-plugin</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <imageName>${onap.nexus.dockerregistry.daily}/${datarouter.node.image.name}</imageName>
+ <dockerDirectory>${docker.location}</dockerDirectory>
+ <serverId>${onap.nexus.dockerregistry.daily}</serverId>
+ <skipDockerBuild>false</skipDockerBuild>
+ <imageTags>
+ <imageTag>${project.version}</imageTag>
+ <imageTag>latest</imageTag>
+ </imageTags>
+ <forceTags>true</forceTags>
+ <resources>
+ <resource>
+ <targetPath>/</targetPath>
+ <directory>${project.basedir}</directory>
+ <excludes>
+ <exclude>target/**/*</exclude>
+ <exclude>pom.xml</exclude>
+ </excludes>
+ </resource>
+
+ <resource>
+ <targetPath>/</targetPath>
+ <directory>${project.build.directory}</directory>
+ <include>**/**</include>
+ </resource>
+ </resources>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <build>
+ <finalName>datarouter-node</finalName>
+ <resources>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/*.properties</include>
+ </includes>
+ </resource>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/EelfMessages.properties</include>
+ </includes>
+ </resource>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/log4j.properties</include>
+ </includes>
+ </resource>
+ </resources>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ </configuration>
+ <version>3.6.0</version>
+ </plugin>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ <outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>
+ <archive>
+ <manifest>
+ <addClasspath>true</addClasspath>
+ <mainClass>org.onap.dmaap.datarouter.node.NodeMain</mainClass>
+ </manifest>
+ </archive>
+ </configuration>
+ <executions>
+ <execution>
+ <id>make-assembly</id>
+ <!-- this is used for inheritance merges -->
+ <phase>package</phase>
+ <!-- bind to the packaging phase -->
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.7</version>
+ <executions>
+ <execution>
+ <id>copy-docker-file</id>
+ <phase>package</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${docker.location}</outputDirectory>
+ <overwrite>true</overwrite>
+ <resources>
+ <resource>
+ <directory>${basedir}/src/main/resources/docker</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>copy-resources</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${basedir}/target/opt/app/datartr/etc</outputDirectory>
+ <resources>
+ <resource>
+ <directory>${basedir}/src/main/resources</directory>
+ <includes>
+ <include>misc/**</include>
+ <include>**/**</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>copy-resources-1</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${basedir}/target/opt/app/datartr/self_signed</outputDirectory>
+ <resources>
+ <resource>
+ <directory>${basedir}/self_signed</directory>
+ <includes>
+ <include>misc/**</include>
+ <include>**/**</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <version>2.10</version>
+ <executions>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/opt/app/datartr/lib</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>false</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <configuration>
+ <failOnError>false</failOnError>
+ </configuration>
+ <executions>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>cobertura-maven-plugin</artifactId>
+ <version>2.7</version>
+ <configuration>
+ <formats>
+ <format>html</format>
+ <format>xml</format>
+ </formats>
+ <check/>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <version>1.6.7</version>
+ <extensions>true</extensions>
+ <configuration>
+ <nexusUrl>${onap.nexus.url}</nexusUrl>
+ <stagingProfileId>176c31dfe190a</stagingProfileId>
+ <serverId>ecomp-staging</serverId>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>${jacoco.version}</version>
+ <configuration>
+ <excludes>
+ <exclude>**/gen/**</exclude>
+ <exclude>**/generated-sources/**</exclude>
+ <exclude>**/yang-gen/**</exclude>
+ <exclude>**/pax/**</exclude>
+ </excludes>
+ </configuration>
+ <executions>
+ <execution>
+ <id>pre-unit-test</id>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ <configuration>
+ <destFile>${project.build.directory}/code-coverage/jacoco-ut.exec</destFile>
+ <propertyName>surefireArgLine</propertyName>
+ </configuration>
+ </execution>
+ <execution>
+ <id>post-unit-test</id>
+ <phase>test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ <configuration>
+ <dataFile>${project.build.directory}/code-coverage/jacoco-ut.exec</dataFile>
+ <outputDirectory>${project.reporting.outputDirectory}/jacoco-ut</outputDirectory>
+ </configuration>
+ </execution>
+ <execution>
+ <id>pre-integration-test</id>
+ <phase>pre-integration-test</phase>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ <configuration>
+ <destFile>${project.build.directory}/code-coverage/jacoco-it.exec</destFile>
+ <propertyName>failsafeArgLine</propertyName>
+ </configuration>
+ </execution>
+ <execution>
+ <id>post-integration-test</id>
+ <phase>post-integration-test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ <configuration>
+ <dataFile>${project.build.directory}/code-coverage/jacoco-it.exec</dataFile>
+ <outputDirectory>${project.reporting.outputDirectory}/jacoco-it</outputDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.io.*;\r
-import org.apache.log4j.Logger;\r
-\r
-/**\r
- * Main control point for delivering files to destinations.\r
- * <p>\r
- * The Delivery class manages assignment of delivery threads to delivery\r
- * queues and creation and destruction of delivery queues as\r
- * configuration changes. DeliveryQueues are assigned threads based on a\r
- * modified round-robin approach giving priority to queues with more work\r
- * as measured by both bytes to deliver and files to deliver and lower\r
- * priority to queues that already have delivery threads working.\r
- * A delivery thread continues to work for a delivery queue as long as\r
- * that queue has more files to deliver.\r
- */\r
-public class Delivery {\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.Delivery");\r
- private static class DelItem implements Comparable<DelItem> {\r
- private String pubid;\r
- private String spool;\r
- public int compareTo(DelItem x) {\r
- int i = pubid.compareTo(x.pubid);\r
- if (i == 0) {\r
- i = spool.compareTo(x.spool);\r
- }\r
- return(i);\r
- }\r
- public String getPublishId() {\r
- return(pubid);\r
- }\r
- public String getSpool() {\r
- return(spool);\r
- }\r
- public DelItem(String pubid, String spool) {\r
- this.pubid = pubid;\r
- this.spool = spool;\r
- }\r
- }\r
- private double fdstart;\r
- private double fdstop;\r
- private int threads;\r
- private int curthreads;\r
- private NodeConfigManager config;\r
- private Hashtable<String, DeliveryQueue> dqs = new Hashtable<String, DeliveryQueue>();\r
- private DeliveryQueue[] queues = new DeliveryQueue[0];\r
- private int qpos = 0;\r
- private long nextcheck;\r
- private Runnable cmon = new Runnable() {\r
- public void run() {\r
- checkconfig();\r
- }\r
- };\r
- /**\r
- * Constructs a new Delivery system using the specified configuration manager.\r
- * @param config The configuration manager for this delivery system.\r
- */\r
- public Delivery(NodeConfigManager config) {\r
- this.config = config;\r
- config.registerConfigTask(cmon);\r
- checkconfig();\r
- }\r
- private void cleardir(String dir) {\r
- if (dqs.get(dir) != null) {\r
- return;\r
- }\r
- File fdir = new File(dir);\r
- for (File junk: fdir.listFiles()) {\r
- if (junk.isFile()) {\r
- junk.delete();\r
- }\r
- }\r
- fdir.delete();\r
- }\r
- private void freeDiskCheck() {\r
- File spoolfile = new File(config.getSpoolBase());\r
- long tspace = spoolfile.getTotalSpace();\r
- long start = (long)(tspace * fdstart);\r
- long stop = (long)(tspace * fdstop);\r
- long cur = spoolfile.getUsableSpace();\r
- if (cur >= start) {\r
- return;\r
- }\r
- Vector<DelItem> cv = new Vector<DelItem>();\r
- for (String sdir: dqs.keySet()) {\r
- for (String meta: (new File(sdir)).list()) {\r
- if (!meta.endsWith(".M") || meta.charAt(0) == '.') {\r
- continue;\r
- }\r
- cv.add(new DelItem(meta.substring(0, meta.length() - 2), sdir));\r
- }\r
- }\r
- DelItem[] items = cv.toArray(new DelItem[cv.size()]);\r
- Arrays.sort(items);\r
- logger.info("NODE0501 Free disk space below red threshold. current=" + cur + " red=" + start + " total=" + tspace);\r
- for (DelItem item: items) {\r
- long amount = dqs.get(item.getSpool()).cancelTask(item.getPublishId());\r
- logger.info("NODE0502 Attempting to discard " + item.getSpool() + "/" + item.getPublishId() + " to free up disk");\r
- if (amount > 0) {\r
- cur += amount;\r
- if (cur >= stop) {\r
- cur = spoolfile.getUsableSpace();\r
- }\r
- if (cur >= stop) {\r
- logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);\r
- return;\r
- }\r
- }\r
- }\r
- cur = spoolfile.getUsableSpace();\r
- if (cur >= stop) {\r
- logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);\r
- return;\r
- }\r
- logger.warn("NODE0504 Unable to recover sufficient disk space to reach green status. current=" + cur + " yellow=" + stop + " total=" + tspace);\r
- }\r
- private void cleardirs() {\r
- String basedir = config.getSpoolBase();\r
- String nbase = basedir + "/n";\r
- for (String nodedir: (new File(nbase)).list()) {\r
- if (!nodedir.startsWith(".")) {\r
- cleardir(nbase + "/" + nodedir);\r
- }\r
- }\r
- String sxbase = basedir + "/s";\r
- for (String sxdir: (new File(sxbase)).list()) {\r
- if (sxdir.startsWith(".")) {\r
- continue;\r
- }\r
- File sxf = new File(sxbase + "/" + sxdir);\r
- for (String sdir: sxf.list()) {\r
- if (!sdir.startsWith(".")) {\r
- cleardir(sxbase + "/" + sxdir + "/" + sdir);\r
- }\r
- }\r
- sxf.delete(); // won't if anything still in it\r
- }\r
- }\r
- private synchronized void checkconfig() {\r
- if (!config.isConfigured()) {\r
- return;\r
- }\r
- fdstart = config.getFreeDiskStart();\r
- fdstop = config.getFreeDiskStop();\r
- threads = config.getDeliveryThreads();\r
- if (threads < 1) {\r
- threads = 1;\r
- }\r
- DestInfo[] alldis = config.getAllDests();\r
- DeliveryQueue[] nqs = new DeliveryQueue[alldis.length];\r
- qpos = 0;\r
- Hashtable<String, DeliveryQueue> ndqs = new Hashtable<String, DeliveryQueue>();\r
- for (DestInfo di: alldis) {\r
- String spl = di.getSpool();\r
- DeliveryQueue dq = dqs.get(spl);\r
- if (dq == null) {\r
- dq = new DeliveryQueue(config, di);\r
- } else {\r
- dq.config(di);\r
- }\r
- ndqs.put(spl, dq);\r
- nqs[qpos++] = dq;\r
- }\r
- queues = nqs;\r
- dqs = ndqs;\r
- cleardirs();\r
- while (curthreads < threads) {\r
- curthreads++;\r
- (new Thread() {\r
- {\r
- setName("Delivery Thread");\r
- }\r
- public void run() {\r
- dodelivery();\r
- }\r
- }).start();\r
- }\r
- nextcheck = 0;\r
- notify();\r
- }\r
- private void dodelivery() {\r
- DeliveryQueue dq;\r
- while ((dq = getNextQueue()) != null) {\r
- dq.run();\r
- }\r
- }\r
- private synchronized DeliveryQueue getNextQueue() {\r
- while (true) {\r
- if (curthreads > threads) {\r
- curthreads--;\r
- return(null);\r
- }\r
- if (qpos < queues.length) {\r
- DeliveryQueue dq = queues[qpos++];\r
- if (dq.isSkipSet()) {\r
- continue;\r
- }\r
- nextcheck = 0;\r
- notify();\r
- return(dq);\r
- }\r
- long now = System.currentTimeMillis();\r
- if (now < nextcheck) {\r
- try {\r
- wait(nextcheck + 500 - now);\r
- } catch (Exception e) {\r
- }\r
- now = System.currentTimeMillis();\r
- }\r
- if (now >= nextcheck) {\r
- nextcheck = now + 5000;\r
- qpos = 0;\r
- freeDiskCheck();\r
- }\r
- }\r
- }\r
- /**\r
- * Reset the retry timer for a delivery queue\r
- */\r
- public synchronized void resetQueue(String spool) {\r
- if (spool != null) {\r
- DeliveryQueue dq = dqs.get(spool);\r
- if (dq != null) {\r
- dq.resetQueue();\r
- }\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.io.*;
+
+import org.apache.log4j.Logger;
+
+/**
+ * Main control point for delivering files to destinations.
+ * <p>
+ * The Delivery class manages assignment of delivery threads to delivery
+ * queues and creation and destruction of delivery queues as
+ * configuration changes. DeliveryQueues are assigned threads based on a
+ * modified round-robin approach giving priority to queues with more work
+ * as measured by both bytes to deliver and files to deliver and lower
+ * priority to queues that already have delivery threads working.
+ * A delivery thread continues to work for a delivery queue as long as
+ * that queue has more files to deliver.
+ */
+public class Delivery {
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.Delivery");
+
+ private static class DelItem implements Comparable<DelItem> {
+ private String pubid;
+ private String spool;
+
+ public int compareTo(DelItem x) {
+ int i = pubid.compareTo(x.pubid);
+ if (i == 0) {
+ i = spool.compareTo(x.spool);
+ }
+ return (i);
+ }
+
+ public String getPublishId() {
+ return (pubid);
+ }
+
+ public String getSpool() {
+ return (spool);
+ }
+
+ public DelItem(String pubid, String spool) {
+ this.pubid = pubid;
+ this.spool = spool;
+ }
+ }
+
+ private double fdstart;
+ private double fdstop;
+ private int threads;
+ private int curthreads;
+ private NodeConfigManager config;
+ private Hashtable<String, DeliveryQueue> dqs = new Hashtable<String, DeliveryQueue>();
+ private DeliveryQueue[] queues = new DeliveryQueue[0];
+ private int qpos = 0;
+ private long nextcheck;
+ private Runnable cmon = new Runnable() {
+ public void run() {
+ checkconfig();
+ }
+ };
+
+ /**
+ * Constructs a new Delivery system using the specified configuration manager.
+ *
+ * @param config The configuration manager for this delivery system.
+ */
+ public Delivery(NodeConfigManager config) {
+ this.config = config;
+ config.registerConfigTask(cmon);
+ checkconfig();
+ }
+
+ private void cleardir(String dir) {
+ if (dqs.get(dir) != null) {
+ return;
+ }
+ File fdir = new File(dir);
+ for (File junk : fdir.listFiles()) {
+ if (junk.isFile()) {
+ junk.delete();
+ }
+ }
+ fdir.delete();
+ }
+
+ private void freeDiskCheck() {
+ File spoolfile = new File(config.getSpoolBase());
+ long tspace = spoolfile.getTotalSpace();
+ long start = (long) (tspace * fdstart);
+ long stop = (long) (tspace * fdstop);
+ long cur = spoolfile.getUsableSpace();
+ if (cur >= start) {
+ return;
+ }
+ Vector<DelItem> cv = new Vector<DelItem>();
+ for (String sdir : dqs.keySet()) {
+ for (String meta : (new File(sdir)).list()) {
+ if (!meta.endsWith(".M") || meta.charAt(0) == '.') {
+ continue;
+ }
+ cv.add(new DelItem(meta.substring(0, meta.length() - 2), sdir));
+ }
+ }
+ DelItem[] items = cv.toArray(new DelItem[cv.size()]);
+ Arrays.sort(items);
+ logger.info("NODE0501 Free disk space below red threshold. current=" + cur + " red=" + start + " total=" + tspace);
+ for (DelItem item : items) {
+ long amount = dqs.get(item.getSpool()).cancelTask(item.getPublishId());
+ logger.info("NODE0502 Attempting to discard " + item.getSpool() + "/" + item.getPublishId() + " to free up disk");
+ if (amount > 0) {
+ cur += amount;
+ if (cur >= stop) {
+ cur = spoolfile.getUsableSpace();
+ }
+ if (cur >= stop) {
+ logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);
+ return;
+ }
+ }
+ }
+ cur = spoolfile.getUsableSpace();
+ if (cur >= stop) {
+ logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);
+ return;
+ }
+ logger.warn("NODE0504 Unable to recover sufficient disk space to reach green status. current=" + cur + " yellow=" + stop + " total=" + tspace);
+ }
+
+ private void cleardirs() {
+ String basedir = config.getSpoolBase();
+ String nbase = basedir + "/n";
+ for (String nodedir : (new File(nbase)).list()) {
+ if (!nodedir.startsWith(".")) {
+ cleardir(nbase + "/" + nodedir);
+ }
+ }
+ String sxbase = basedir + "/s";
+ for (String sxdir : (new File(sxbase)).list()) {
+ if (sxdir.startsWith(".")) {
+ continue;
+ }
+ File sxf = new File(sxbase + "/" + sxdir);
+ for (String sdir : sxf.list()) {
+ if (!sdir.startsWith(".")) {
+ cleardir(sxbase + "/" + sxdir + "/" + sdir);
+ }
+ }
+ sxf.delete(); // won't if anything still in it
+ }
+ }
+
+ private synchronized void checkconfig() {
+ if (!config.isConfigured()) {
+ return;
+ }
+ fdstart = config.getFreeDiskStart();
+ fdstop = config.getFreeDiskStop();
+ threads = config.getDeliveryThreads();
+ if (threads < 1) {
+ threads = 1;
+ }
+ DestInfo[] alldis = config.getAllDests();
+ DeliveryQueue[] nqs = new DeliveryQueue[alldis.length];
+ qpos = 0;
+ Hashtable<String, DeliveryQueue> ndqs = new Hashtable<String, DeliveryQueue>();
+ for (DestInfo di : alldis) {
+ String spl = di.getSpool();
+ DeliveryQueue dq = dqs.get(spl);
+ if (dq == null) {
+ dq = new DeliveryQueue(config, di);
+ } else {
+ dq.config(di);
+ }
+ ndqs.put(spl, dq);
+ nqs[qpos++] = dq;
+ }
+ queues = nqs;
+ dqs = ndqs;
+ cleardirs();
+ while (curthreads < threads) {
+ curthreads++;
+ (new Thread() {
+ {
+ setName("Delivery Thread");
+ }
+
+ public void run() {
+ dodelivery();
+ }
+ }).start();
+ }
+ nextcheck = 0;
+ notify();
+ }
+
+ private void dodelivery() {
+ DeliveryQueue dq;
+ while ((dq = getNextQueue()) != null) {
+ dq.run();
+ }
+ }
+
+ private synchronized DeliveryQueue getNextQueue() {
+ while (true) {
+ if (curthreads > threads) {
+ curthreads--;
+ return (null);
+ }
+ if (qpos < queues.length) {
+ DeliveryQueue dq = queues[qpos++];
+ if (dq.isSkipSet()) {
+ continue;
+ }
+ nextcheck = 0;
+ notify();
+ return (dq);
+ }
+ long now = System.currentTimeMillis();
+ if (now < nextcheck) {
+ try {
+ wait(nextcheck + 500 - now);
+ } catch (Exception e) {
+ }
+ now = System.currentTimeMillis();
+ }
+ if (now >= nextcheck) {
+ nextcheck = now + 5000;
+ qpos = 0;
+ freeDiskCheck();
+ }
+ }
+ }
+
+ /**
+ * Reset the retry timer for a delivery queue
+ */
+ public synchronized void resetQueue(String spool) {
+ if (spool != null) {
+ DeliveryQueue dq = dqs.get(spool);
+ if (dq != null) {
+ dq.resetQueue();
+ }
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.io.*;\r
-import java.util.*;\r
-\r
-/**\r
- * Mechanism for monitoring and controlling delivery of files to a destination.\r
- * <p>\r
- * The DeliveryQueue class maintains lists of DeliveryTasks for a single\r
- * destination (a subscription or another data router node) and assigns\r
- * delivery threads to try to deliver them. It also maintains a delivery\r
- * status that causes it to back off on delivery attempts after a failure.\r
- * <p>\r
- * If the most recent delivery result was a failure, then no more attempts\r
- * will be made for a period of time. Initially, and on the first failure\r
- * following a success, this delay will be DeliveryQueueHelper.getInitFailureTimer() (milliseconds).\r
- * If, after this delay, additional failures occur, each failure will\r
- * multiply the delay by DeliveryQueueHelper.getFailureBackoff() up to a\r
- * maximum delay specified by DeliveryQueueHelper.getMaxFailureTimer().\r
- * Note that this behavior applies to the delivery queue as a whole and not\r
- * to individual files in the queue. If multiple files are being\r
- * delivered and one fails, the delay will be started. If a second\r
- * delivery fails while the delay was active, it will not change the delay\r
- * or change the duration of any subsequent delay.\r
- * If, however, it succeeds, it will cancel the delay.\r
- * <p>\r
- * The queue maintains 3 collections of files to deliver: A todo list of\r
- * files that will be attempted, a working set of files that are being\r
- * attempted, and a retry set of files that were attempted and failed.\r
- * Whenever the todo list is empty and needs to be refilled, a scan of the\r
- * spool directory is made and the file names sorted. Any files in the working set are ignored.\r
- * If a DeliveryTask for the file is in the retry set, then that delivery\r
- * task is placed on the todo list. Otherwise, a new DeliveryTask for the\r
- * file is created and placed on the todo list.\r
- * If, when a DeliveryTask is about to be removed from the todo list, its\r
- * age exceeds DeliveryQueueHelper.getExpirationTimer(), then it is instead\r
- * marked as expired.\r
- * <p>\r
- * A delivery queue also maintains a skip flag. This flag is true if the\r
- * failure timer is active or if no files are found in a directory scan.\r
- */\r
-public class DeliveryQueue implements Runnable, DeliveryTaskHelper {\r
- private DeliveryQueueHelper dqh;\r
- private DestInfo di;\r
- private Hashtable<String, DeliveryTask> working = new Hashtable<String, DeliveryTask>();\r
- private Hashtable<String, DeliveryTask> retry = new Hashtable<String, DeliveryTask>();\r
- private int todoindex;\r
- private boolean failed;\r
- private long failduration;\r
- private long resumetime;\r
- File dir;\r
- private Vector<DeliveryTask> todo = new Vector<DeliveryTask>();\r
- /**\r
- * Try to cancel a delivery task.\r
- * @return The length of the task in bytes or 0 if the task cannot be cancelled.\r
- */\r
- public synchronized long cancelTask(String pubid) {\r
- if (working.get(pubid) != null) {\r
- return(0);\r
- }\r
- DeliveryTask dt = retry.get(pubid);\r
- if (dt == null) {\r
- for (int i = todoindex; i < todo.size(); i++) {\r
- DeliveryTask xdt = todo.get(i);\r
- if (xdt.getPublishId().equals(pubid)) {\r
- dt = xdt;\r
- break;\r
- }\r
- }\r
- }\r
- if (dt == null) {\r
- dt = new DeliveryTask(this, pubid);\r
- if (dt.getFileId() == null) {\r
- return(0);\r
- }\r
- }\r
- if (dt.isCleaned()) {\r
- return(0);\r
- }\r
- StatusLog.logExp(dt.getPublishId(), dt.getFeedId(), dt.getSubId(), dt.getURL(), dt.getMethod(), dt.getCType(), dt.getLength(), "diskFull", dt.getAttempts());\r
- dt.clean();\r
- return(dt.getLength());\r
- }\r
- /**\r
- * Mark that a delivery task has succeeded.\r
- */\r
- public synchronized void markSuccess(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- task.clean();\r
- failed = false;\r
- failduration = 0;\r
- }\r
- /**\r
- * Mark that a delivery task has expired.\r
- */\r
- public synchronized void markExpired(DeliveryTask task) {\r
- task.clean();\r
- }\r
- /**\r
- * Mark that a delivery task has failed permanently.\r
- */\r
- public synchronized void markFailNoRetry(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- task.clean();\r
- failed = false;\r
- failduration = 0;\r
- }\r
- private void fdupdate() {\r
- if (!failed) {\r
- failed = true;\r
- if (failduration == 0) {\r
- failduration = dqh.getInitFailureTimer();\r
- }\r
- resumetime = System.currentTimeMillis() + failduration;\r
- long maxdur = dqh.getMaxFailureTimer();\r
- failduration = (long)(failduration * dqh.getFailureBackoff());\r
- if (failduration > maxdur) {\r
- failduration = maxdur;\r
- }\r
- }\r
- }\r
- /**\r
- * Mark that a delivery task has been redirected.\r
- */\r
- public synchronized void markRedirect(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- retry.put(task.getPublishId(), task);\r
- }\r
- /**\r
- * Mark that a delivery task has temporarily failed.\r
- */\r
- public synchronized void markFailWithRetry(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- retry.put(task.getPublishId(), task);\r
- fdupdate();\r
- }\r
- /**\r
- * Get the next task.\r
- */\r
- public synchronized DeliveryTask getNext() {\r
- DeliveryTask ret = peekNext();\r
- if (ret != null) {\r
- todoindex++;\r
- working.put(ret.getPublishId(), ret);\r
- }\r
- return(ret);\r
- }\r
- /**\r
- * Peek at the next task.\r
- */\r
- public synchronized DeliveryTask peekNext() {\r
- long now = System.currentTimeMillis();\r
- long mindate = now - dqh.getExpirationTimer();\r
- if (failed) {\r
- if (now > resumetime) {\r
- failed = false;\r
- } else {\r
- return(null);\r
- }\r
- }\r
- while (true) {\r
- if (todoindex >= todo.size()) {\r
- todoindex = 0;\r
- todo = new Vector<DeliveryTask>();\r
- String[] files = dir.list();\r
- Arrays.sort(files);\r
- for (String fname: files) {\r
- if (!fname.endsWith(".M")) {\r
- continue;\r
- }\r
- String fname2 = fname.substring(0, fname.length() - 2);\r
- long pidtime = 0;\r
- int dot = fname2.indexOf('.');\r
- if (dot < 1) {\r
- continue;\r
- }\r
- try {\r
- pidtime = Long.parseLong(fname2.substring(0, dot));\r
- } catch (Exception e) {\r
- }\r
- if (pidtime < 1000000000000L) {\r
- continue;\r
- }\r
- if (working.get(fname2) != null) {\r
- continue;\r
- }\r
- DeliveryTask dt = retry.get(fname2);\r
- if (dt == null) {\r
- dt = new DeliveryTask(this, fname2);\r
- }\r
- todo.add(dt);\r
- }\r
- retry = new Hashtable<String, DeliveryTask>();\r
- }\r
- if (todoindex < todo.size()) {\r
- DeliveryTask dt = todo.get(todoindex);\r
- if (dt.isCleaned()) {\r
- todoindex++;\r
- continue;\r
- }\r
- if (dt.getDate() >= mindate) {\r
- return(dt);\r
- }\r
- todoindex++;\r
- reportExpiry(dt);\r
- continue;\r
- }\r
- return(null);\r
- }\r
- }\r
- /**\r
- * Create a delivery queue for a given destination info\r
- */\r
- public DeliveryQueue(DeliveryQueueHelper dqh, DestInfo di) {\r
- this.dqh = dqh;\r
- this.di = di;\r
- dir = new File(di.getSpool());\r
- dir.mkdirs();\r
- }\r
- /**\r
- * Update the destination info for this delivery queue\r
- */\r
- public void config(DestInfo di) {\r
- this.di = di;\r
- }\r
- /**\r
- * Get the dest info\r
- */\r
- public DestInfo getDestInfo() {\r
- return(di);\r
- }\r
- /**\r
- * Get the config manager\r
- */\r
- public DeliveryQueueHelper getConfig() {\r
- return(dqh);\r
- }\r
- /**\r
- * Exceptional condition occurred during delivery\r
- */\r
- public void reportDeliveryExtra(DeliveryTask task, long sent) {\r
- StatusLog.logDelExtra(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getLength(), sent);\r
- }\r
- /**\r
- * Message too old to deliver\r
- */\r
- public void reportExpiry(DeliveryTask task) {\r
- StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "retriesExhausted", task.getAttempts());\r
- markExpired(task);\r
- }\r
- /**\r
- * Completed a delivery attempt\r
- */\r
- public void reportStatus(DeliveryTask task, int status, String xpubid, String location) {\r
- if (status < 300) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, xpubid);\r
- markSuccess(task);\r
- } else if (status < 400 && dqh.isFollowRedirects()) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);\r
- if (dqh.handleRedirection(di, location, task.getFileId())) {\r
- markRedirect(task);\r
- } else {\r
- StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());\r
- markFailNoRetry(task);\r
- }\r
- } else if (status < 500) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);\r
- StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());\r
- markFailNoRetry(task);\r
- } else {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);\r
- markFailWithRetry(task);\r
- }\r
- }\r
- /**\r
- * Delivery failed by reason of an exception\r
- */\r
- public void reportException(DeliveryTask task, Exception exception) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), -1, exception.toString());\r
- dqh.handleUnreachable(di);\r
- markFailWithRetry(task);\r
- }\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid) {\r
- return(dqh.getFeedId(subid));\r
- }\r
- /**\r
- * Get the URL to deliver a message to given the file ID\r
- */\r
- public String getDestURL(String fileid) {\r
- return(dqh.getDestURL(di, fileid));\r
- }\r
- /**\r
- * Deliver files until there's a failure or there are no more\r
- * files to deliver\r
- */\r
- public void run() {\r
- DeliveryTask t;\r
- long endtime = System.currentTimeMillis() + dqh.getFairTimeLimit();\r
- int filestogo = dqh.getFairFileLimit();\r
- while ((t = getNext()) != null) {\r
- t.run();\r
- if (--filestogo <= 0 || System.currentTimeMillis() > endtime) {\r
- break;\r
- }\r
- }\r
- }\r
- /**\r
- * Is there no work to do for this queue right now?\r
- */\r
- public synchronized boolean isSkipSet() {\r
- return(peekNext() == null);\r
- }\r
- /**\r
- * Reset the retry timer\r
- */\r
- public void resetQueue() {\r
- resumetime = System.currentTimeMillis();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ * Mechanism for monitoring and controlling delivery of files to a destination.
+ * <p>
+ * The DeliveryQueue class maintains lists of DeliveryTasks for a single
+ * destination (a subscription or another data router node) and assigns
+ * delivery threads to try to deliver them. It also maintains a delivery
+ * status that causes it to back off on delivery attempts after a failure.
+ * <p>
+ * If the most recent delivery result was a failure, then no more attempts
+ * will be made for a period of time. Initially, and on the first failure
+ * following a success, this delay will be DeliveryQueueHelper.getInitFailureTimer() (milliseconds).
+ * If, after this delay, additional failures occur, each failure will
+ * multiply the delay by DeliveryQueueHelper.getFailureBackoff() up to a
+ * maximum delay specified by DeliveryQueueHelper.getMaxFailureTimer().
+ * Note that this behavior applies to the delivery queue as a whole and not
+ * to individual files in the queue. If multiple files are being
+ * delivered and one fails, the delay will be started. If a second
+ * delivery fails while the delay was active, it will not change the delay
+ * or change the duration of any subsequent delay.
+ * If, however, it succeeds, it will cancel the delay.
+ * <p>
+ * The queue maintains 3 collections of files to deliver: A todo list of
+ * files that will be attempted, a working set of files that are being
+ * attempted, and a retry set of files that were attempted and failed.
+ * Whenever the todo list is empty and needs to be refilled, a scan of the
+ * spool directory is made and the file names sorted. Any files in the working set are ignored.
+ * If a DeliveryTask for the file is in the retry set, then that delivery
+ * task is placed on the todo list. Otherwise, a new DeliveryTask for the
+ * file is created and placed on the todo list.
+ * If, when a DeliveryTask is about to be removed from the todo list, its
+ * age exceeds DeliveryQueueHelper.getExpirationTimer(), then it is instead
+ * marked as expired.
+ * <p>
+ * A delivery queue also maintains a skip flag. This flag is true if the
+ * failure timer is active or if no files are found in a directory scan.
+ */
+public class DeliveryQueue implements Runnable, DeliveryTaskHelper {
+ private DeliveryQueueHelper dqh;
+ private DestInfo di;
+ private Hashtable<String, DeliveryTask> working = new Hashtable<String, DeliveryTask>();
+ private Hashtable<String, DeliveryTask> retry = new Hashtable<String, DeliveryTask>();
+ private int todoindex;
+ private boolean failed;
+ private long failduration;
+ private long resumetime;
+ File dir;
+ private Vector<DeliveryTask> todo = new Vector<DeliveryTask>();
+
+ /**
+ * Try to cancel a delivery task.
+ *
+ * @return The length of the task in bytes or 0 if the task cannot be cancelled.
+ */
+ public synchronized long cancelTask(String pubid) {
+ if (working.get(pubid) != null) {
+ return (0);
+ }
+ DeliveryTask dt = retry.get(pubid);
+ if (dt == null) {
+ for (int i = todoindex; i < todo.size(); i++) {
+ DeliveryTask xdt = todo.get(i);
+ if (xdt.getPublishId().equals(pubid)) {
+ dt = xdt;
+ break;
+ }
+ }
+ }
+ if (dt == null) {
+ dt = new DeliveryTask(this, pubid);
+ if (dt.getFileId() == null) {
+ return (0);
+ }
+ }
+ if (dt.isCleaned()) {
+ return (0);
+ }
+ StatusLog.logExp(dt.getPublishId(), dt.getFeedId(), dt.getSubId(), dt.getURL(), dt.getMethod(), dt.getCType(), dt.getLength(), "diskFull", dt.getAttempts());
+ dt.clean();
+ return (dt.getLength());
+ }
+
+ /**
+ * Mark that a delivery task has succeeded.
+ */
+ public synchronized void markSuccess(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ task.clean();
+ failed = false;
+ failduration = 0;
+ }
+
+ /**
+ * Mark that a delivery task has expired.
+ */
+ public synchronized void markExpired(DeliveryTask task) {
+ task.clean();
+ }
+
+ /**
+ * Mark that a delivery task has failed permanently.
+ */
+ public synchronized void markFailNoRetry(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ task.clean();
+ failed = false;
+ failduration = 0;
+ }
+
+ private void fdupdate() {
+ if (!failed) {
+ failed = true;
+ if (failduration == 0) {
+ failduration = dqh.getInitFailureTimer();
+ }
+ resumetime = System.currentTimeMillis() + failduration;
+ long maxdur = dqh.getMaxFailureTimer();
+ failduration = (long) (failduration * dqh.getFailureBackoff());
+ if (failduration > maxdur) {
+ failduration = maxdur;
+ }
+ }
+ }
+
+ /**
+ * Mark that a delivery task has been redirected.
+ */
+ public synchronized void markRedirect(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ retry.put(task.getPublishId(), task);
+ }
+
+ /**
+ * Mark that a delivery task has temporarily failed.
+ */
+ public synchronized void markFailWithRetry(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ retry.put(task.getPublishId(), task);
+ fdupdate();
+ }
+
+ /**
+ * Get the next task.
+ */
+ public synchronized DeliveryTask getNext() {
+ DeliveryTask ret = peekNext();
+ if (ret != null) {
+ todoindex++;
+ working.put(ret.getPublishId(), ret);
+ }
+ return (ret);
+ }
+
+ /**
+ * Peek at the next task.
+ */
+ public synchronized DeliveryTask peekNext() {
+ long now = System.currentTimeMillis();
+ long mindate = now - dqh.getExpirationTimer();
+ if (failed) {
+ if (now > resumetime) {
+ failed = false;
+ } else {
+ return (null);
+ }
+ }
+ while (true) {
+ if (todoindex >= todo.size()) {
+ todoindex = 0;
+ todo = new Vector<DeliveryTask>();
+ String[] files = dir.list();
+ Arrays.sort(files);
+ for (String fname : files) {
+ if (!fname.endsWith(".M")) {
+ continue;
+ }
+ String fname2 = fname.substring(0, fname.length() - 2);
+ long pidtime = 0;
+ int dot = fname2.indexOf('.');
+ if (dot < 1) {
+ continue;
+ }
+ try {
+ pidtime = Long.parseLong(fname2.substring(0, dot));
+ } catch (Exception e) {
+ }
+ if (pidtime < 1000000000000L) {
+ continue;
+ }
+ if (working.get(fname2) != null) {
+ continue;
+ }
+ DeliveryTask dt = retry.get(fname2);
+ if (dt == null) {
+ dt = new DeliveryTask(this, fname2);
+ }
+ todo.add(dt);
+ }
+ retry = new Hashtable<String, DeliveryTask>();
+ }
+ if (todoindex < todo.size()) {
+ DeliveryTask dt = todo.get(todoindex);
+ if (dt.isCleaned()) {
+ todoindex++;
+ continue;
+ }
+ if (dt.getDate() >= mindate) {
+ return (dt);
+ }
+ todoindex++;
+ reportExpiry(dt);
+ continue;
+ }
+ return (null);
+ }
+ }
+
+ /**
+ * Create a delivery queue for a given destination info
+ */
+ public DeliveryQueue(DeliveryQueueHelper dqh, DestInfo di) {
+ this.dqh = dqh;
+ this.di = di;
+ dir = new File(di.getSpool());
+ dir.mkdirs();
+ }
+
+ /**
+ * Update the destination info for this delivery queue
+ */
+ public void config(DestInfo di) {
+ this.di = di;
+ }
+
+ /**
+ * Get the dest info
+ */
+ public DestInfo getDestInfo() {
+ return (di);
+ }
+
+ /**
+ * Get the config manager
+ */
+ public DeliveryQueueHelper getConfig() {
+ return (dqh);
+ }
+
+ /**
+ * Exceptional condition occurred during delivery
+ */
+ public void reportDeliveryExtra(DeliveryTask task, long sent) {
+ StatusLog.logDelExtra(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getLength(), sent);
+ }
+
+ /**
+ * Message too old to deliver
+ */
+ public void reportExpiry(DeliveryTask task) {
+ StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "retriesExhausted", task.getAttempts());
+ markExpired(task);
+ }
+
+ /**
+ * Completed a delivery attempt
+ */
+ public void reportStatus(DeliveryTask task, int status, String xpubid, String location) {
+ if (status < 300) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, xpubid);
+ markSuccess(task);
+ } else if (status < 400 && dqh.isFollowRedirects()) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);
+ if (dqh.handleRedirection(di, location, task.getFileId())) {
+ markRedirect(task);
+ } else {
+ StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());
+ markFailNoRetry(task);
+ }
+ } else if (status < 500) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);
+ StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());
+ markFailNoRetry(task);
+ } else {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);
+ markFailWithRetry(task);
+ }
+ }
+
+ /**
+ * Delivery failed by reason of an exception
+ */
+ public void reportException(DeliveryTask task, Exception exception) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), -1, exception.toString());
+ dqh.handleUnreachable(di);
+ markFailWithRetry(task);
+ }
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid) {
+ return (dqh.getFeedId(subid));
+ }
+
+ /**
+ * Get the URL to deliver a message to given the file ID
+ */
+ public String getDestURL(String fileid) {
+ return (dqh.getDestURL(di, fileid));
+ }
+
+ /**
+ * Deliver files until there's a failure or there are no more
+ * files to deliver
+ */
+ public void run() {
+ DeliveryTask t;
+ long endtime = System.currentTimeMillis() + dqh.getFairTimeLimit();
+ int filestogo = dqh.getFairFileLimit();
+ while ((t = getNext()) != null) {
+ t.run();
+ if (--filestogo <= 0 || System.currentTimeMillis() > endtime) {
+ break;
+ }
+ }
+ }
+
+ /**
+ * Is there no work to do for this queue right now?
+ */
+ public synchronized boolean isSkipSet() {
+ return (peekNext() == null);
+ }
+
+ /**
+ * Reset the retry timer
+ */
+ public void resetQueue() {
+ resumetime = System.currentTimeMillis();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Interface to allow independent testing of the DeliveryQueue code\r
- * <p>\r
- * This interface represents all of the configuration information and\r
- * feedback mechanisms that a delivery queue needs.\r
- */\r
-public interface DeliveryQueueHelper {\r
- /**\r
- * Get the timeout (milliseconds) before retrying after an initial delivery failure\r
- */\r
- public long getInitFailureTimer();\r
- /**\r
- * Get the ratio between timeouts on consecutive delivery attempts\r
- */\r
- public double getFailureBackoff();\r
- /**\r
- * Get the maximum timeout (milliseconds) between delivery attempts\r
- */\r
- public long getMaxFailureTimer();\r
- /**\r
- * Get the expiration timer (milliseconds) for deliveries\r
- */\r
- public long getExpirationTimer();\r
- /**\r
- * Get the maximum number of file delivery attempts before checking\r
- * if another queue has work to be performed.\r
- */\r
- public int getFairFileLimit();\r
- /**\r
- * Get the maximum amount of time spent delivering files before checking if another queue has work to be performed.\r
- */\r
- public long getFairTimeLimit();\r
- /**\r
- * Get the URL for delivering a file\r
- * @param dest The destination information for the file to be delivered.\r
- * @param fileid The file id for the file to be delivered.\r
- * @return The URL for delivering the file (typically, dest.getURL() + "/" + fileid).\r
- */\r
- public String getDestURL(DestInfo dest, String fileid);\r
- /**\r
- * Forget redirections associated with a subscriber\r
- * @param dest Destination information to forget\r
- */\r
- public void handleUnreachable(DestInfo dest);\r
- /**\r
- * Post redirection for a subscriber\r
- * @param dest Destination information to update\r
- * @param location Location given by subscriber\r
- * @param fileid File ID of request\r
- * @return true if this 3xx response is retryable, otherwise, false.\r
- */\r
- public boolean handleRedirection(DestInfo dest, String location, String fileid);\r
- /**\r
- * Should I handle 3xx responses differently than 4xx responses?\r
- */\r
- public boolean isFollowRedirects();\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid);\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Interface to allow independent testing of the DeliveryQueue code
+ * <p>
+ * This interface represents all of the configuration information and
+ * feedback mechanisms that a delivery queue needs.
+ */
+public interface DeliveryQueueHelper {
+ /**
+ * Get the timeout (milliseconds) before retrying after an initial delivery failure
+ */
+ public long getInitFailureTimer();
+
+ /**
+ * Get the ratio between timeouts on consecutive delivery attempts
+ */
+ public double getFailureBackoff();
+
+ /**
+ * Get the maximum timeout (milliseconds) between delivery attempts
+ */
+ public long getMaxFailureTimer();
+
+ /**
+ * Get the expiration timer (milliseconds) for deliveries
+ */
+ public long getExpirationTimer();
+
+ /**
+ * Get the maximum number of file delivery attempts before checking
+ * if another queue has work to be performed.
+ */
+ public int getFairFileLimit();
+
+ /**
+ * Get the maximum amount of time spent delivering files before checking if another queue has work to be performed.
+ */
+ public long getFairTimeLimit();
+
+ /**
+ * Get the URL for delivering a file
+ *
+ * @param dest The destination information for the file to be delivered.
+ * @param fileid The file id for the file to be delivered.
+ * @return The URL for delivering the file (typically, dest.getURL() + "/" + fileid).
+ */
+ public String getDestURL(DestInfo dest, String fileid);
+
+ /**
+ * Forget redirections associated with a subscriber
+ *
+ * @param dest Destination information to forget
+ */
+ public void handleUnreachable(DestInfo dest);
+
+ /**
+ * Post redirection for a subscriber
+ *
+ * @param dest Destination information to update
+ * @param location Location given by subscriber
+ * @param fileid File ID of request
+ * @return true if this 3xx response is retryable, otherwise, false.
+ */
+ public boolean handleRedirection(DestInfo dest, String location, String fileid);
+
+ /**
+ * Should I handle 3xx responses differently than 4xx responses?
+ */
+ public boolean isFollowRedirects();
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid);
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.io.*;\r
-import java.net.*;\r
-import java.util.*;\r
-import org.apache.log4j.Logger;\r
-\r
-/**\r
- * A file to be delivered to a destination.\r
- * <p>\r
- * A Delivery task represents a work item for the data router - a file that\r
- * needs to be delivered and provides mechanisms to get information about\r
- * the file and its delivery data as well as to attempt delivery.\r
- */\r
-public class DeliveryTask implements Runnable, Comparable<DeliveryTask> {\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.DeliveryTask");\r
- private DeliveryTaskHelper dth;\r
- private String pubid;\r
- private DestInfo di;\r
- private String spool;\r
- private File datafile;\r
- private File metafile;\r
- private long length;\r
- private long date;\r
- private String method;\r
- private String fileid;\r
- private String ctype;\r
- private String url;\r
- private String feedid;\r
- private String subid;\r
- private int attempts;\r
- private String[][] hdrs;\r
- /**\r
- * Is the object a DeliveryTask with the same publication ID?\r
- */\r
- public boolean equals(Object o) {\r
- if (!(o instanceof DeliveryTask)) {\r
- return(false);\r
- }\r
- return(pubid.equals(((DeliveryTask)o).pubid));\r
- }\r
- /**\r
- * Compare the publication IDs.\r
- */\r
- public int compareTo(DeliveryTask o) {\r
- return(pubid.compareTo(o.pubid));\r
- }\r
- /**\r
- * Get the hash code of the publication ID.\r
- */\r
- public int hashCode() {\r
- return(pubid.hashCode());\r
- }\r
- /**\r
- * Return the publication ID.\r
- */\r
- public String toString() {\r
- return(pubid);\r
- }\r
- /**\r
- * Create a delivery task for a given delivery queue and pub ID\r
- * @param dth The delivery task helper for the queue this task is in.\r
- * @param pubid The publish ID for this file. This is used as\r
- * the base for the file name in the spool directory and is of\r
- * the form <milliseconds since 1970>.<fqdn of initial data router node>\r
- */\r
- public DeliveryTask(DeliveryTaskHelper dth, String pubid) {\r
- this.dth = dth;\r
- this.pubid = pubid;\r
- di = dth.getDestInfo();\r
- subid = di.getSubId();\r
- feedid = di.getLogData();\r
- spool = di.getSpool();\r
- String dfn = spool + "/" + pubid;\r
- String mfn = dfn + ".M";\r
- datafile = new File(spool + "/" + pubid);\r
- metafile = new File(mfn);\r
- boolean monly = di.isMetaDataOnly();\r
- date = Long.parseLong(pubid.substring(0, pubid.indexOf('.')));\r
- Vector<String[]> hdrv = new Vector<String[]>();\r
- try {\r
- BufferedReader br = new BufferedReader(new FileReader(metafile));\r
- String s = br.readLine();\r
- int i = s.indexOf('\t');\r
- method = s.substring(0, i);\r
- if (!"DELETE".equals(method) && !monly) {\r
- length = datafile.length();\r
- }\r
- fileid = s.substring(i + 1);\r
- while ((s = br.readLine()) != null) {\r
- i = s.indexOf('\t');\r
- String h = s.substring(0, i);\r
- String v = s.substring(i + 1);\r
- if ("x-att-dr-routing".equalsIgnoreCase(h)) {\r
- subid = v.replaceAll("[^ ]*/", "");\r
- feedid = dth.getFeedId(subid.replaceAll(" .*", ""));\r
- }\r
- if (length == 0 && h.toLowerCase().startsWith("content-")) {\r
- continue;\r
- }\r
- if (h.equalsIgnoreCase("content-type")) {\r
- ctype = v;\r
- }\r
- hdrv.add(new String[] {h, v});\r
- }\r
- br.close();\r
- } catch (Exception e) {\r
- }\r
- hdrs = hdrv.toArray(new String[hdrv.size()][]);\r
- url = dth.getDestURL(fileid);\r
- }\r
- /**\r
- * Get the publish ID\r
- */\r
- public String getPublishId() {\r
- return(pubid);\r
- }\r
- /**\r
- * Attempt delivery\r
- */\r
- public void run() {\r
- attempts++;\r
- try {\r
- di = dth.getDestInfo();\r
- boolean expect100 = di.isUsing100();\r
- boolean monly = di.isMetaDataOnly();\r
- length = 0;\r
- if (!"DELETE".equals(method) && !monly) {\r
- length = datafile.length();\r
- }\r
- url = dth.getDestURL(fileid);\r
- URL u = new URL(url);\r
- HttpURLConnection uc = (HttpURLConnection)u.openConnection();\r
- uc.setConnectTimeout(60000);\r
- uc.setReadTimeout(60000);\r
- uc.setInstanceFollowRedirects(false);\r
- uc.setRequestMethod(method);\r
- uc.setRequestProperty("Content-Length", Long.toString(length));\r
- uc.setRequestProperty("Authorization", di.getAuth());\r
- uc.setRequestProperty("X-ATT-DR-PUBLISH-ID", pubid);\r
- for (String[] nv: hdrs) {\r
- uc.addRequestProperty(nv[0], nv[1]);\r
- }\r
- if (length > 0) {\r
- if (expect100) {\r
- uc.setRequestProperty("Expect", "100-continue");\r
- }\r
- uc.setFixedLengthStreamingMode(length);\r
- uc.setDoOutput(true);\r
- OutputStream os = null;\r
- try {\r
- os = uc.getOutputStream();\r
- } catch (ProtocolException pe) {\r
- dth.reportDeliveryExtra(this, -1L);\r
- // Rcvd error instead of 100-continue\r
- }\r
- if (os != null) {\r
- long sofar = 0;\r
- try {\r
- byte[] buf = new byte[1024 * 1024];\r
- InputStream is = new FileInputStream(datafile);\r
- while (sofar < length) {\r
- int i = buf.length;\r
- if (sofar + i > length) {\r
- i = (int)(length - sofar);\r
- }\r
- i = is.read(buf, 0, i);\r
- if (i <= 0) {\r
- throw new IOException("Unexpected problem reading data file " + datafile);\r
- }\r
- sofar += i;\r
- os.write(buf, 0, i);\r
- }\r
- is.close();\r
- os.close();\r
- } catch (IOException ioe) {\r
- dth.reportDeliveryExtra(this, sofar);\r
- throw ioe;\r
- }\r
- }\r
- }\r
- int rc = uc.getResponseCode();\r
- String rmsg = uc.getResponseMessage();\r
- if (rmsg == null) {\r
- String h0 = uc.getHeaderField(0);\r
- if (h0 != null) {\r
- int i = h0.indexOf(' ');\r
- int j = h0.indexOf(' ', i + 1);\r
- if (i != -1 && j != -1) {\r
- rmsg = h0.substring(j + 1);\r
- }\r
- }\r
- }\r
- String xpubid = null;\r
- InputStream is;\r
- if (rc >= 200 && rc <= 299) {\r
- is = uc.getInputStream();\r
- xpubid = uc.getHeaderField("X-ATT-DR-PUBLISH-ID");\r
- } else {\r
- if (rc >= 300 && rc <= 399) {\r
- rmsg = uc.getHeaderField("Location");\r
- }\r
- is = uc.getErrorStream();\r
- }\r
- byte[] buf = new byte[4096];\r
- if (is != null) {\r
- while (is.read(buf) > 0) {\r
- }\r
- is.close();\r
- }\r
- dth.reportStatus(this, rc, xpubid, rmsg);\r
- } catch (Exception e) {\r
- dth.reportException(this, e);\r
- }\r
- }\r
- /**\r
- * Remove meta and data files\r
- */\r
- public void clean() {\r
- datafile.delete();\r
- metafile.delete();\r
- hdrs = null;\r
- }\r
- /**\r
- * Has this delivery task been cleaned?\r
- */\r
- public boolean isCleaned() {\r
- return(hdrs == null);\r
- }\r
- /**\r
- * Get length of body\r
- */\r
- public long getLength() {\r
- return(length);\r
- }\r
- /**\r
- * Get creation date as encoded in the publish ID.\r
- */\r
- public long getDate() {\r
- return(date);\r
- }\r
- /**\r
- * Get the most recent delivery attempt URL\r
- */\r
- public String getURL() {\r
- return(url);\r
- }\r
- /**\r
- * Get the content type\r
- */\r
- public String getCType() {\r
- return(ctype);\r
- }\r
- /**\r
- * Get the method\r
- */\r
- public String getMethod() {\r
- return(method);\r
- }\r
- /**\r
- * Get the file ID\r
- */\r
- public String getFileId() {\r
- return(fileid);\r
- }\r
- /**\r
- * Get the number of delivery attempts\r
- */\r
- public int getAttempts() {\r
- return(attempts);\r
- }\r
- /**\r
- * Get the (space delimited list of) subscription ID for this delivery task\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the feed ID for this delivery task\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.io.*;
+import java.net.*;
+import java.util.*;
+
+import org.apache.log4j.Logger;
+
+/**
+ * A file to be delivered to a destination.
+ * <p>
+ * A Delivery task represents a work item for the data router - a file that
+ * needs to be delivered and provides mechanisms to get information about
+ * the file and its delivery data as well as to attempt delivery.
+ */
+public class DeliveryTask implements Runnable, Comparable<DeliveryTask> {
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.DeliveryTask");
+ private DeliveryTaskHelper dth;
+ private String pubid;
+ private DestInfo di;
+ private String spool;
+ private File datafile;
+ private File metafile;
+ private long length;
+ private long date;
+ private String method;
+ private String fileid;
+ private String ctype;
+ private String url;
+ private String feedid;
+ private String subid;
+ private int attempts;
+ private String[][] hdrs;
+
+ /**
+ * Is the object a DeliveryTask with the same publication ID?
+ */
+ public boolean equals(Object o) {
+ if (!(o instanceof DeliveryTask)) {
+ return (false);
+ }
+ return (pubid.equals(((DeliveryTask) o).pubid));
+ }
+
+ /**
+ * Compare the publication IDs.
+ */
+ public int compareTo(DeliveryTask o) {
+ return (pubid.compareTo(o.pubid));
+ }
+
+ /**
+ * Get the hash code of the publication ID.
+ */
+ public int hashCode() {
+ return (pubid.hashCode());
+ }
+
+ /**
+ * Return the publication ID.
+ */
+ public String toString() {
+ return (pubid);
+ }
+
+ /**
+ * Create a delivery task for a given delivery queue and pub ID
+ *
+ * @param dth The delivery task helper for the queue this task is in.
+ * @param pubid The publish ID for this file. This is used as
+ * the base for the file name in the spool directory and is of
+ * the form <milliseconds since 1970>.<fqdn of initial data router node>
+ */
+ public DeliveryTask(DeliveryTaskHelper dth, String pubid) {
+ this.dth = dth;
+ this.pubid = pubid;
+ di = dth.getDestInfo();
+ subid = di.getSubId();
+ feedid = di.getLogData();
+ spool = di.getSpool();
+ String dfn = spool + "/" + pubid;
+ String mfn = dfn + ".M";
+ datafile = new File(spool + "/" + pubid);
+ metafile = new File(mfn);
+ boolean monly = di.isMetaDataOnly();
+ date = Long.parseLong(pubid.substring(0, pubid.indexOf('.')));
+ Vector<String[]> hdrv = new Vector<String[]>();
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(metafile));
+ String s = br.readLine();
+ int i = s.indexOf('\t');
+ method = s.substring(0, i);
+ if (!"DELETE".equals(method) && !monly) {
+ length = datafile.length();
+ }
+ fileid = s.substring(i + 1);
+ while ((s = br.readLine()) != null) {
+ i = s.indexOf('\t');
+ String h = s.substring(0, i);
+ String v = s.substring(i + 1);
+ if ("x-att-dr-routing".equalsIgnoreCase(h)) {
+ subid = v.replaceAll("[^ ]*/", "");
+ feedid = dth.getFeedId(subid.replaceAll(" .*", ""));
+ }
+ if (length == 0 && h.toLowerCase().startsWith("content-")) {
+ continue;
+ }
+ if (h.equalsIgnoreCase("content-type")) {
+ ctype = v;
+ }
+ hdrv.add(new String[]{h, v});
+ }
+ br.close();
+ } catch (Exception e) {
+ }
+ hdrs = hdrv.toArray(new String[hdrv.size()][]);
+ url = dth.getDestURL(fileid);
+ }
+
+ /**
+ * Get the publish ID
+ */
+ public String getPublishId() {
+ return (pubid);
+ }
+
+ /**
+ * Attempt delivery
+ */
+ public void run() {
+ attempts++;
+ try {
+ di = dth.getDestInfo();
+ boolean expect100 = di.isUsing100();
+ boolean monly = di.isMetaDataOnly();
+ length = 0;
+ if (!"DELETE".equals(method) && !monly) {
+ length = datafile.length();
+ }
+ url = dth.getDestURL(fileid);
+ URL u = new URL(url);
+ HttpURLConnection uc = (HttpURLConnection) u.openConnection();
+ uc.setConnectTimeout(60000);
+ uc.setReadTimeout(60000);
+ uc.setInstanceFollowRedirects(false);
+ uc.setRequestMethod(method);
+ uc.setRequestProperty("Content-Length", Long.toString(length));
+ uc.setRequestProperty("Authorization", di.getAuth());
+ uc.setRequestProperty("X-ATT-DR-PUBLISH-ID", pubid);
+ for (String[] nv : hdrs) {
+ uc.addRequestProperty(nv[0], nv[1]);
+ }
+ if (length > 0) {
+ if (expect100) {
+ uc.setRequestProperty("Expect", "100-continue");
+ }
+ uc.setFixedLengthStreamingMode(length);
+ uc.setDoOutput(true);
+ OutputStream os = null;
+ try {
+ os = uc.getOutputStream();
+ } catch (ProtocolException pe) {
+ dth.reportDeliveryExtra(this, -1L);
+ // Rcvd error instead of 100-continue
+ }
+ if (os != null) {
+ long sofar = 0;
+ try {
+ byte[] buf = new byte[1024 * 1024];
+ InputStream is = new FileInputStream(datafile);
+ while (sofar < length) {
+ int i = buf.length;
+ if (sofar + i > length) {
+ i = (int) (length - sofar);
+ }
+ i = is.read(buf, 0, i);
+ if (i <= 0) {
+ throw new IOException("Unexpected problem reading data file " + datafile);
+ }
+ sofar += i;
+ os.write(buf, 0, i);
+ }
+ is.close();
+ os.close();
+ } catch (IOException ioe) {
+ dth.reportDeliveryExtra(this, sofar);
+ throw ioe;
+ }
+ }
+ }
+ int rc = uc.getResponseCode();
+ String rmsg = uc.getResponseMessage();
+ if (rmsg == null) {
+ String h0 = uc.getHeaderField(0);
+ if (h0 != null) {
+ int i = h0.indexOf(' ');
+ int j = h0.indexOf(' ', i + 1);
+ if (i != -1 && j != -1) {
+ rmsg = h0.substring(j + 1);
+ }
+ }
+ }
+ String xpubid = null;
+ InputStream is;
+ if (rc >= 200 && rc <= 299) {
+ is = uc.getInputStream();
+ xpubid = uc.getHeaderField("X-ATT-DR-PUBLISH-ID");
+ } else {
+ if (rc >= 300 && rc <= 399) {
+ rmsg = uc.getHeaderField("Location");
+ }
+ is = uc.getErrorStream();
+ }
+ byte[] buf = new byte[4096];
+ if (is != null) {
+ while (is.read(buf) > 0) {
+ }
+ is.close();
+ }
+ dth.reportStatus(this, rc, xpubid, rmsg);
+ } catch (Exception e) {
+ dth.reportException(this, e);
+ }
+ }
+
+ /**
+ * Remove meta and data files
+ */
+ public void clean() {
+ datafile.delete();
+ metafile.delete();
+ hdrs = null;
+ }
+
+ /**
+ * Has this delivery task been cleaned?
+ */
+ public boolean isCleaned() {
+ return (hdrs == null);
+ }
+
+ /**
+ * Get length of body
+ */
+ public long getLength() {
+ return (length);
+ }
+
+ /**
+ * Get creation date as encoded in the publish ID.
+ */
+ public long getDate() {
+ return (date);
+ }
+
+ /**
+ * Get the most recent delivery attempt URL
+ */
+ public String getURL() {
+ return (url);
+ }
+
+ /**
+ * Get the content type
+ */
+ public String getCType() {
+ return (ctype);
+ }
+
+ /**
+ * Get the method
+ */
+ public String getMethod() {
+ return (method);
+ }
+
+ /**
+ * Get the file ID
+ */
+ public String getFileId() {
+ return (fileid);
+ }
+
+ /**
+ * Get the number of delivery attempts
+ */
+ public int getAttempts() {
+ return (attempts);
+ }
+
+ /**
+ * Get the (space delimited list of) subscription ID for this delivery task
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the feed ID for this delivery task
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Interface to allow independent testing of the DeliveryTask code.\r
- * <p>\r
- * This interface represents all the configuraiton information and\r
- * feedback mechanisms that a delivery task needs.\r
- */\r
-\r
-public interface DeliveryTaskHelper {\r
- /**\r
- * Report that a delivery attempt failed due to an exception (like can't connect to remote host)\r
- * @param task The task that failed\r
- * @param exception The exception that occurred\r
- */\r
- public void reportException(DeliveryTask task, Exception exception);\r
- /**\r
- * Report that a delivery attempt completed (successfully or unsuccessfully)\r
- * @param task The task that failed\r
- * @param status The HTTP status\r
- * @param xpubid The publish ID from the far end (if any)\r
- * @param location The redirection location for a 3XX response\r
- */\r
- public void reportStatus(DeliveryTask task, int status, String xpubid, String location);\r
- /**\r
- * Report that a delivery attempt either failed while sending data or that an error was returned instead of a 100 Continue.\r
- * @param task The task that failed\r
- * @param sent The number of bytes sent or -1 if an error was returned instead of 100 Continue.\r
- */\r
- public void reportDeliveryExtra(DeliveryTask task, long sent);\r
- /**\r
- * Get the destination information for the delivery queue\r
- * @return The destination information\r
- */\r
- public DestInfo getDestInfo();\r
- /**\r
- * Given a file ID, get the URL to deliver to\r
- * @param fileid The file id\r
- * @return The URL to deliver to\r
- */\r
- public String getDestURL(String fileid);\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed iD\r
- */\r
- public String getFeedId(String subid);\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Interface to allow independent testing of the DeliveryTask code.
+ * <p>
+ * This interface represents all the configuraiton information and
+ * feedback mechanisms that a delivery task needs.
+ */
+
+public interface DeliveryTaskHelper {
+ /**
+ * Report that a delivery attempt failed due to an exception (like can't connect to remote host)
+ *
+ * @param task The task that failed
+ * @param exception The exception that occurred
+ */
+ public void reportException(DeliveryTask task, Exception exception);
+
+ /**
+ * Report that a delivery attempt completed (successfully or unsuccessfully)
+ *
+ * @param task The task that failed
+ * @param status The HTTP status
+ * @param xpubid The publish ID from the far end (if any)
+ * @param location The redirection location for a 3XX response
+ */
+ public void reportStatus(DeliveryTask task, int status, String xpubid, String location);
+
+ /**
+ * Report that a delivery attempt either failed while sending data or that an error was returned instead of a 100 Continue.
+ *
+ * @param task The task that failed
+ * @param sent The number of bytes sent or -1 if an error was returned instead of 100 Continue.
+ */
+ public void reportDeliveryExtra(DeliveryTask task, long sent);
+
+ /**
+ * Get the destination information for the delivery queue
+ *
+ * @return The destination information
+ */
+ public DestInfo getDestInfo();
+
+ /**
+ * Given a file ID, get the URL to deliver to
+ *
+ * @param fileid The file id
+ * @return The URL to deliver to
+ */
+ public String getDestURL(String fileid);
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed iD
+ */
+ public String getFeedId(String subid);
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Information for a delivery destination that doesn't change from message to message\r
- */\r
-public class DestInfo {\r
- private String name;\r
- private String spool;\r
- private String subid;\r
- private String logdata;\r
- private String url;\r
- private String authuser;\r
- private String authentication;\r
- private boolean metaonly;\r
- private boolean use100;\r
- /**\r
- * Create a destination information object.\r
- * @param name n:fqdn or s:subid\r
- * @param spool The directory where files are spooled.\r
- * @param subid The subscription ID (if applicable).\r
- * @param logdata Text to be included in log messages\r
- * @param url The URL to deliver to.\r
- * @param authuser The auth user for logging.\r
- * @param authentication The credentials.\r
- * @param metaonly Is this a metadata only delivery?\r
- * @param use100 Should I use expect 100-continue?\r
- */\r
- public DestInfo(String name, String spool, String subid, String logdata, String url, String authuser, String authentication, boolean metaonly, boolean use100) {\r
- this.name = name;\r
- this.spool = spool;\r
- this.subid = subid;\r
- this.logdata = logdata;\r
- this.url = url;\r
- this.authuser = authuser;\r
- this.authentication = authentication;\r
- this.metaonly = metaonly;\r
- this.use100 = use100;\r
- }\r
- public boolean equals(Object o) {\r
- return((o instanceof DestInfo) && ((DestInfo)o).spool.equals(spool));\r
- }\r
- public int hashCode() {\r
- return(spool.hashCode());\r
- }\r
- /**\r
- * Get the name of this destination\r
- */\r
- public String getName() {\r
- return(name);\r
- }\r
- /**\r
- * Get the spool directory for this destination.\r
- * @return The spool directory\r
- */\r
- public String getSpool() {\r
- return(spool);\r
- }\r
- /**\r
- * Get the subscription ID.\r
- * @return Subscription ID or null if this is a node to node delivery.\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the log data.\r
- * @return Text to be included in a log message about delivery attempts.\r
- */\r
- public String getLogData() {\r
- return(logdata);\r
- }\r
- /**\r
- * Get the delivery URL.\r
- * @return The URL to deliver to (the primary URL).\r
- */\r
- public String getURL() {\r
- return(url);\r
-\r
- }\r
- /**\r
- * Get the user for authentication\r
- * @return The name of the user for logging\r
- */\r
- public String getAuthUser() {\r
- return(authuser);\r
- }\r
- /**\r
- * Get the authentication header\r
- * @return The string to use to authenticate to the recipient.\r
- */\r
- public String getAuth() {\r
- return(authentication);\r
- }\r
- /**\r
- * Is this a metadata only delivery?\r
- * @return True if this is a metadata only delivery\r
- */\r
- public boolean isMetaDataOnly() {\r
- return(metaonly);\r
- }\r
- /**\r
- * Should I send expect 100-continue header?\r
- * @return True if I should.\r
- */\r
- public boolean isUsing100() {\r
- return(use100);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Information for a delivery destination that doesn't change from message to message
+ */
+public class DestInfo {
+ private String name;
+ private String spool;
+ private String subid;
+ private String logdata;
+ private String url;
+ private String authuser;
+ private String authentication;
+ private boolean metaonly;
+ private boolean use100;
+
+ /**
+ * Create a destination information object.
+ *
+ * @param name n:fqdn or s:subid
+ * @param spool The directory where files are spooled.
+ * @param subid The subscription ID (if applicable).
+ * @param logdata Text to be included in log messages
+ * @param url The URL to deliver to.
+ * @param authuser The auth user for logging.
+ * @param authentication The credentials.
+ * @param metaonly Is this a metadata only delivery?
+ * @param use100 Should I use expect 100-continue?
+ */
+ public DestInfo(String name, String spool, String subid, String logdata, String url, String authuser, String authentication, boolean metaonly, boolean use100) {
+ this.name = name;
+ this.spool = spool;
+ this.subid = subid;
+ this.logdata = logdata;
+ this.url = url;
+ this.authuser = authuser;
+ this.authentication = authentication;
+ this.metaonly = metaonly;
+ this.use100 = use100;
+ }
+
+ public boolean equals(Object o) {
+ return ((o instanceof DestInfo) && ((DestInfo) o).spool.equals(spool));
+ }
+
+ public int hashCode() {
+ return (spool.hashCode());
+ }
+
+ /**
+ * Get the name of this destination
+ */
+ public String getName() {
+ return (name);
+ }
+
+ /**
+ * Get the spool directory for this destination.
+ *
+ * @return The spool directory
+ */
+ public String getSpool() {
+ return (spool);
+ }
+
+ /**
+ * Get the subscription ID.
+ *
+ * @return Subscription ID or null if this is a node to node delivery.
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the log data.
+ *
+ * @return Text to be included in a log message about delivery attempts.
+ */
+ public String getLogData() {
+ return (logdata);
+ }
+
+ /**
+ * Get the delivery URL.
+ *
+ * @return The URL to deliver to (the primary URL).
+ */
+ public String getURL() {
+ return (url);
+
+ }
+
+ /**
+ * Get the user for authentication
+ *
+ * @return The name of the user for logging
+ */
+ public String getAuthUser() {
+ return (authuser);
+ }
+
+ /**
+ * Get the authentication header
+ *
+ * @return The string to use to authenticate to the recipient.
+ */
+ public String getAuth() {
+ return (authentication);
+ }
+
+ /**
+ * Is this a metadata only delivery?
+ *
+ * @return True if this is a metadata only delivery
+ */
+ public boolean isMetaDataOnly() {
+ return (metaonly);
+ }
+
+ /**
+ * Should I send expect 100-continue header?
+ *
+ * @return True if I should.
+ */
+ public boolean isUsing100() {
+ return (use100);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.net.*;\r
-\r
-/**\r
- * Determine if an IP address is from a machine\r
- */\r
-public class IsFrom {\r
- private long nextcheck;\r
- private String[] ips;\r
- private String fqdn;\r
- /**\r
- * Configure the JVM DNS cache to have a 10 second TTL. This needs to be called very very early or it won't have any effect.\r
- */\r
- public static void setDNSCache() {\r
- java.security.Security.setProperty("networkaddress.cache.ttl", "10");\r
- }\r
- /**\r
- * Create an IsFrom for the specified fully qualified domain name.\r
- */\r
- public IsFrom(String fqdn) {\r
- this.fqdn = fqdn;\r
- }\r
- /**\r
- * Check if an IP address matches. If it has been more than\r
- * 10 seconds since DNS was last checked for changes to the\r
- * IP address(es) of this FQDN, check again. Then check\r
- * if the specified IP address belongs to the FQDN.\r
- */\r
- public synchronized boolean isFrom(String ip) {\r
- long now = System.currentTimeMillis();\r
- if (now > nextcheck) {\r
- nextcheck = now + 10000;\r
- Vector<String> v = new Vector<String>();\r
- try {\r
- InetAddress[] addrs = InetAddress.getAllByName(fqdn);\r
- for (InetAddress a: addrs) {\r
- v.add(a.getHostAddress());\r
- }\r
- } catch (Exception e) {\r
- }\r
- ips = v.toArray(new String[v.size()]);\r
- }\r
- for (String s: ips) {\r
- if (s.equals(ip)) {\r
- return(true);\r
- }\r
- }\r
- return(false);\r
- }\r
- /**\r
- * Return the fully qualified domain name\r
- */\r
- public String toString() {\r
- return(fqdn);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.net.*;
+
+/**
+ * Determine if an IP address is from a machine
+ */
+public class IsFrom {
+ private long nextcheck;
+ private String[] ips;
+ private String fqdn;
+
+ /**
+ * Configure the JVM DNS cache to have a 10 second TTL. This needs to be called very very early or it won't have any effect.
+ */
+ public static void setDNSCache() {
+ java.security.Security.setProperty("networkaddress.cache.ttl", "10");
+ }
+
+ /**
+ * Create an IsFrom for the specified fully qualified domain name.
+ */
+ public IsFrom(String fqdn) {
+ this.fqdn = fqdn;
+ }
+
+ /**
+ * Check if an IP address matches. If it has been more than
+ * 10 seconds since DNS was last checked for changes to the
+ * IP address(es) of this FQDN, check again. Then check
+ * if the specified IP address belongs to the FQDN.
+ */
+ public synchronized boolean isFrom(String ip) {
+ long now = System.currentTimeMillis();
+ if (now > nextcheck) {
+ nextcheck = now + 10000;
+ Vector<String> v = new Vector<String>();
+ try {
+ InetAddress[] addrs = InetAddress.getAllByName(fqdn);
+ for (InetAddress a : addrs) {
+ v.add(a.getHostAddress());
+ }
+ } catch (Exception e) {
+ }
+ ips = v.toArray(new String[v.size()]);
+ }
+ for (String s : ips) {
+ if (s.equals(ip)) {
+ return (true);
+ }
+ }
+ return (false);
+ }
+
+ /**
+ * Return the fully qualified domain name
+ */
+ public String toString() {
+ return (fqdn);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.util.regex.*;\r
-import java.io.*;\r
-import java.nio.file.*;\r
-import java.text.*;\r
-\r
-/**\r
- * Cleanup of old log files.\r
- * <p>\r
- * Periodically scan the log directory for log files that are older than\r
- * the log file retention interval, and delete them. In a future release,\r
- * This class will also be responsible for uploading events logs to the\r
- * log server to support the log query APIs.\r
- */\r
-\r
-public class LogManager extends TimerTask {\r
- private NodeConfigManager config;\r
- private Matcher isnodelog;\r
- private Matcher iseventlog;\r
- private Uploader worker;\r
- private String uploaddir;\r
- private String logdir;\r
- private class Uploader extends Thread implements DeliveryQueueHelper {\r
- public long getInitFailureTimer() { return(10000L); }\r
- public double getFailureBackoff() { return(2.0); }\r
- public long getMaxFailureTimer() { return(150000L); }\r
- public long getExpirationTimer() { return(604800000L); }\r
- public int getFairFileLimit() { return(10000); }\r
- public long getFairTimeLimit() { return(86400000); }\r
- public String getDestURL(DestInfo dest, String fileid) {\r
- return(config.getEventLogUrl());\r
- }\r
- public void handleUnreachable(DestInfo dest) {}\r
- public boolean handleRedirection(DestInfo dest, String location, String fileid) { return(false); }\r
- public boolean isFollowRedirects() { return(false); }\r
- public String getFeedId(String subid) { return(null); }\r
- private DeliveryQueue dq;\r
- public Uploader() {\r
- dq = new DeliveryQueue(this, new DestInfo("LogUpload", uploaddir, null, null, null, config.getMyName(), config.getMyAuth(), false, false));\r
- setDaemon(true);\r
- setName("Log Uploader");\r
- start();\r
- }\r
- private synchronized void snooze() {\r
- try {\r
- wait(10000);\r
- } catch (Exception e) {\r
- }\r
- }\r
- private synchronized void poke() {\r
- notify();\r
- }\r
- public void run() {\r
- while (true) {\r
- scan();\r
- dq.run();\r
- snooze();\r
- }\r
- }\r
- private void scan() {\r
- long threshold = System.currentTimeMillis() - config.getLogRetention();\r
- File dir = new File(logdir);\r
- String[] fns = dir.list();\r
- Arrays.sort(fns);\r
- String lastqueued = "events-000000000000.log";\r
- String curlog = StatusLog.getCurLogFile();\r
- curlog = curlog.substring(curlog.lastIndexOf('/') + 1);\r
- try {\r
- Writer w = new FileWriter(uploaddir + "/.meta");\r
- w.write("POST\tlogdata\nContent-Type\ttext/plain\n");\r
- w.close();\r
- BufferedReader br = new BufferedReader(new FileReader(uploaddir + "/.lastqueued"));\r
- lastqueued = br.readLine();\r
- br.close();\r
- } catch (Exception e) {\r
- }\r
- for (String fn: fns) {\r
- if (!isnodelog.reset(fn).matches()) {\r
- if (!iseventlog.reset(fn).matches()) {\r
- continue;\r
- }\r
- if (lastqueued.compareTo(fn) < 0 && curlog.compareTo(fn) > 0) {\r
- lastqueued = fn;\r
- try {\r
- String pid = config.getPublishId();\r
- Files.createLink(Paths.get(uploaddir + "/" + pid), Paths.get(logdir + "/" + fn));\r
- Files.createLink(Paths.get(uploaddir + "/" + pid + ".M"), Paths.get(uploaddir + "/.meta"));\r
- } catch (Exception e) {\r
- }\r
- }\r
- }\r
- File f = new File(dir, fn);\r
- if (f.lastModified() < threshold) {\r
- f.delete();\r
- }\r
- }\r
- try {\r
- (new File(uploaddir + "/.meta")).delete();\r
- Writer w = new FileWriter(uploaddir + "/.lastqueued");\r
- w.write(lastqueued + "\n");\r
- w.close();\r
- } catch (Exception e) {\r
- }\r
- }\r
- }\r
- /**\r
- * Construct a log manager\r
- * <p>\r
- * The log manager will check for expired log files every 5 minutes\r
- * at 20 seconds after the 5 minute boundary. (Actually, the\r
- * interval is the event log rollover interval, which\r
- * defaults to 5 minutes).\r
- */\r
- public LogManager(NodeConfigManager config) {\r
- this.config = config;\r
- try {\r
- isnodelog = Pattern.compile("node\\.log\\.\\d{8}").matcher("");\r
- iseventlog = Pattern.compile("events-\\d{12}\\.log").matcher("");\r
- } catch (Exception e) {}\r
- logdir = config.getLogDir();\r
- uploaddir = logdir + "/.spool";\r
- (new File(uploaddir)).mkdirs();\r
- long now = System.currentTimeMillis();\r
- long intvl = StatusLog.parseInterval(config.getEventLogInterval(), 300000);\r
- long when = now - now % intvl + intvl + 20000L;\r
- config.getTimer().scheduleAtFixedRate(this, when - now, intvl);\r
- worker = new Uploader();\r
- }\r
- /**\r
- * Trigger check for expired log files and log files to upload\r
- */\r
- public void run() {\r
- worker.poke();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.util.regex.*;
+import java.io.*;
+import java.nio.file.*;
+import java.text.*;
+
+/**
+ * Cleanup of old log files.
+ * <p>
+ * Periodically scan the log directory for log files that are older than
+ * the log file retention interval, and delete them. In a future release,
+ * This class will also be responsible for uploading events logs to the
+ * log server to support the log query APIs.
+ */
+
+public class LogManager extends TimerTask {
+ private NodeConfigManager config;
+ private Matcher isnodelog;
+ private Matcher iseventlog;
+ private Uploader worker;
+ private String uploaddir;
+ private String logdir;
+
+ private class Uploader extends Thread implements DeliveryQueueHelper {
+ public long getInitFailureTimer() {
+ return (10000L);
+ }
+
+ public double getFailureBackoff() {
+ return (2.0);
+ }
+
+ public long getMaxFailureTimer() {
+ return (150000L);
+ }
+
+ public long getExpirationTimer() {
+ return (604800000L);
+ }
+
+ public int getFairFileLimit() {
+ return (10000);
+ }
+
+ public long getFairTimeLimit() {
+ return (86400000);
+ }
+
+ public String getDestURL(DestInfo dest, String fileid) {
+ return (config.getEventLogUrl());
+ }
+
+ public void handleUnreachable(DestInfo dest) {
+ }
+
+ public boolean handleRedirection(DestInfo dest, String location, String fileid) {
+ return (false);
+ }
+
+ public boolean isFollowRedirects() {
+ return (false);
+ }
+
+ public String getFeedId(String subid) {
+ return (null);
+ }
+
+ private DeliveryQueue dq;
+
+ public Uploader() {
+ dq = new DeliveryQueue(this, new DestInfo("LogUpload", uploaddir, null, null, null, config.getMyName(), config.getMyAuth(), false, false));
+ setDaemon(true);
+ setName("Log Uploader");
+ start();
+ }
+
+ private synchronized void snooze() {
+ try {
+ wait(10000);
+ } catch (Exception e) {
+ }
+ }
+
+ private synchronized void poke() {
+ notify();
+ }
+
+ public void run() {
+ while (true) {
+ scan();
+ dq.run();
+ snooze();
+ }
+ }
+
+ private void scan() {
+ long threshold = System.currentTimeMillis() - config.getLogRetention();
+ File dir = new File(logdir);
+ String[] fns = dir.list();
+ Arrays.sort(fns);
+ String lastqueued = "events-000000000000.log";
+ String curlog = StatusLog.getCurLogFile();
+ curlog = curlog.substring(curlog.lastIndexOf('/') + 1);
+ try {
+ Writer w = new FileWriter(uploaddir + "/.meta");
+ w.write("POST\tlogdata\nContent-Type\ttext/plain\n");
+ w.close();
+ BufferedReader br = new BufferedReader(new FileReader(uploaddir + "/.lastqueued"));
+ lastqueued = br.readLine();
+ br.close();
+ } catch (Exception e) {
+ }
+ for (String fn : fns) {
+ if (!isnodelog.reset(fn).matches()) {
+ if (!iseventlog.reset(fn).matches()) {
+ continue;
+ }
+ if (lastqueued.compareTo(fn) < 0 && curlog.compareTo(fn) > 0) {
+ lastqueued = fn;
+ try {
+ String pid = config.getPublishId();
+ Files.createLink(Paths.get(uploaddir + "/" + pid), Paths.get(logdir + "/" + fn));
+ Files.createLink(Paths.get(uploaddir + "/" + pid + ".M"), Paths.get(uploaddir + "/.meta"));
+ } catch (Exception e) {
+ }
+ }
+ }
+ File f = new File(dir, fn);
+ if (f.lastModified() < threshold) {
+ f.delete();
+ }
+ }
+ try {
+ (new File(uploaddir + "/.meta")).delete();
+ Writer w = new FileWriter(uploaddir + "/.lastqueued");
+ w.write(lastqueued + "\n");
+ w.close();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ /**
+ * Construct a log manager
+ * <p>
+ * The log manager will check for expired log files every 5 minutes
+ * at 20 seconds after the 5 minute boundary. (Actually, the
+ * interval is the event log rollover interval, which
+ * defaults to 5 minutes).
+ */
+ public LogManager(NodeConfigManager config) {
+ this.config = config;
+ try {
+ isnodelog = Pattern.compile("node\\.log\\.\\d{8}").matcher("");
+ iseventlog = Pattern.compile("events-\\d{12}\\.log").matcher("");
+ } catch (Exception e) {
+ }
+ logdir = config.getLogDir();
+ uploaddir = logdir + "/.spool";
+ (new File(uploaddir)).mkdirs();
+ long now = System.currentTimeMillis();
+ long intvl = StatusLog.parseInterval(config.getEventLogInterval(), 300000);
+ long when = now - now % intvl + intvl + 20000L;
+ config.getTimer().scheduleAtFixedRate(this, when - now, intvl);
+ worker = new Uploader();
+ }
+
+ /**
+ * Trigger check for expired log files and log files to upload
+ */
+ public void run() {
+ worker.poke();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.io.*;\r
-\r
-/**\r
- * Processed configuration for this node.\r
- * <p>\r
- * The NodeConfig represents a processed configuration from the Data Router\r
- * provisioning server. Each time configuration data is received from the\r
- * provisioning server, a new NodeConfig is created and the previous one\r
- * discarded.\r
- */\r
-public class NodeConfig {\r
- /**\r
- * Raw configuration entry for a data router node\r
- */\r
- public static class ProvNode {\r
- private String cname;\r
- /**\r
- * Construct a node configuration entry.\r
- * @param cname The cname of the node.\r
- */\r
- public ProvNode(String cname) {\r
- this.cname = cname;\r
- }\r
- /**\r
- * Get the cname of the node\r
- */\r
- public String getCName() {\r
- return(cname);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a provisioning parameter\r
- */\r
- public static class ProvParam {\r
- private String name;\r
- private String value;\r
- /**\r
- * Construct a provisioning parameter configuration entry.\r
- * @param name The name of the parameter.\r
- * @param value The value of the parameter.\r
- */\r
- public ProvParam(String name, String value) {\r
- this.name = name;\r
- this.value = value;\r
- }\r
- /**\r
- * Get the name of the parameter.\r
- */\r
- public String getName() {\r
- return(name);\r
- }\r
- /**\r
- * Get the value of the parameter.\r
- */\r
- public String getValue() {\r
- return(value);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a data feed.\r
- */\r
- public static class ProvFeed {\r
- private String id;\r
- private String logdata;\r
- private String status;\r
- /**\r
- * Construct a feed configuration entry.\r
- * @param id The feed ID of the entry.\r
- * @param logdata String for log entries about the entry.\r
- * @param status The reason why this feed cannot be used (Feed has been deleted, Feed has been suspended) or null if it is valid.\r
- */\r
- public ProvFeed(String id, String logdata, String status) {\r
- this.id = id;\r
- this.logdata = logdata;\r
- this.status = status;\r
- }\r
- /**\r
- * Get the feed id of the data feed.\r
- */\r
- public String getId() {\r
- return(id);\r
- }\r
- /**\r
- * Get the log data of the data feed.\r
- */\r
- public String getLogData() {\r
- return(logdata);\r
- }\r
- /**\r
- * Get the status of the data feed.\r
- */\r
- public String getStatus() {\r
- return(status);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a feed user.\r
- */\r
- public static class ProvFeedUser {\r
- private String feedid;\r
- private String user;\r
- private String credentials;\r
- /**\r
- * Construct a feed user configuration entry\r
- * @param feedid The feed id.\r
- * @param user The user that will publish to the feed.\r
- * @param credentials The Authorization header the user will use to publish.\r
- */\r
- public ProvFeedUser(String feedid, String user, String credentials) {\r
- this.feedid = feedid;\r
- this.user = user;\r
- this.credentials = credentials;\r
- }\r
- /**\r
- * Get the feed id of the feed user.\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the user for the feed user.\r
- */\r
- public String getUser() {\r
- return(user);\r
- }\r
- /**\r
- * Get the credentials for the feed user.\r
- */\r
- public String getCredentials() {\r
- return(credentials);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a feed subnet\r
- */\r
- public static class ProvFeedSubnet {\r
- private String feedid;\r
- private String cidr;\r
- /**\r
- * Construct a feed subnet configuration entry\r
- * @param feedid The feed ID\r
- * @param cidr The CIDR allowed to publish to the feed.\r
- */\r
- public ProvFeedSubnet(String feedid, String cidr) {\r
- this.feedid = feedid;\r
- this.cidr = cidr;\r
- }\r
- /**\r
- * Get the feed id of the feed subnet.\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the CIDR of the feed subnet.\r
- */\r
- public String getCidr() {\r
- return(cidr);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a subscription\r
- */\r
- public static class ProvSubscription {\r
- private String subid;\r
- private String feedid;\r
- private String url;\r
- private String authuser;\r
- private String credentials;\r
- private boolean metaonly;\r
- private boolean use100;\r
- /**\r
- * Construct a subscription configuration entry\r
- * @param subid The subscription ID\r
- * @param feedid The feed ID\r
- * @param url The base delivery URL (not including the fileid)\r
- * @param authuser The user in the credentials used to deliver\r
- * @param credentials The credentials used to authenticate to the delivery URL exactly as they go in the Authorization header.\r
- * @param metaonly Is this a meta data only subscription?\r
- * @param use100 Should we send Expect: 100-continue?\r
- */\r
- public ProvSubscription(String subid, String feedid, String url, String authuser, String credentials, boolean metaonly, boolean use100) {\r
- this.subid = subid;\r
- this.feedid = feedid;\r
- this.url = url;\r
- this.authuser = authuser;\r
- this.credentials = credentials;\r
- this.metaonly = metaonly;\r
- this.use100 = use100;\r
- }\r
- /**\r
- * Get the subscription ID\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the feed ID\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the delivery URL\r
- */\r
- public String getURL() {\r
- return(url);\r
- }\r
- /**\r
- * Get the user\r
- */\r
- public String getAuthUser() {\r
- return(authuser);\r
- }\r
- /**\r
- * Get the delivery credentials\r
- */\r
- public String getCredentials() {\r
- return(credentials);\r
- }\r
- /**\r
- * Is this a meta data only subscription?\r
- */\r
- public boolean isMetaDataOnly() {\r
- return(metaonly);\r
- }\r
- /**\r
- * Should we send Expect: 100-continue?\r
- */\r
- public boolean isUsing100() {\r
- return(use100);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for controlled ingress to the data router node\r
- */\r
- public static class ProvForceIngress {\r
- private String feedid;\r
- private String subnet;\r
- private String user;\r
- private String[] nodes;\r
- /**\r
- * Construct a forced ingress configuration entry\r
- * @param feedid The feed ID that this entry applies to\r
- * @param subnet The CIDR for which publisher IP addresses this entry applies to or "" if it applies to all publisher IP addresses\r
- * @param user The publishing user this entry applies to or "" if it applies to all publishing users.\r
- * @param nodes The array of FQDNs of the data router nodes to redirect publication attempts to.\r
- */\r
- public ProvForceIngress(String feedid, String subnet, String user, String[] nodes) {\r
- this.feedid = feedid;\r
- this.subnet = subnet;\r
- this.user = user;\r
- this.nodes = nodes;\r
- }\r
- /**\r
- * Get the feed ID\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the subnet\r
- */\r
- public String getSubnet() {\r
- return(subnet);\r
- }\r
- /**\r
- * Get the user\r
- */\r
- public String getUser() {\r
- return(user);\r
- }\r
- /**\r
- * Get the node\r
- */\r
- public String[] getNodes() {\r
- return(nodes);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for controlled egress from the data router\r
- */\r
- public static class ProvForceEgress {\r
- private String subid;\r
- private String node;\r
- /**\r
- * Construct a forced egress configuration entry\r
- * @param subid The subscription ID the subscription with forced egress\r
- * @param node The node handling deliveries for this subscription\r
- */\r
- public ProvForceEgress(String subid, String node) {\r
- this.subid = subid;\r
- this.node = node;\r
- }\r
- /**\r
- * Get the subscription ID\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the node\r
- */\r
- public String getNode() {\r
- return(node);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for routing within the data router network\r
- */\r
- public static class ProvHop {\r
- private String from;\r
- private String to;\r
- private String via;\r
- /**\r
- * A human readable description of this entry\r
- */\r
- public String toString() {\r
- return("Hop " + from + "->" + to + " via " + via);\r
- }\r
- /**\r
- * Construct a hop entry\r
- * @param from The FQDN of the node with the data to be delivered\r
- * @param to The FQDN of the node that will deliver to the subscriber\r
- * @param via The FQDN of the node where the from node should send the data\r
- */\r
- public ProvHop(String from, String to, String via) {\r
- this.from = from;\r
- this.to = to;\r
- this.via = via;\r
- }\r
- /**\r
- * Get the from node\r
- */\r
- public String getFrom() {\r
- return(from);\r
- }\r
- /**\r
- * Get the to node\r
- */\r
- public String getTo() {\r
- return(to);\r
- }\r
- /**\r
- * Get the next intermediate node\r
- */\r
- public String getVia() {\r
- return(via);\r
- }\r
- }\r
- private static class Redirection {\r
- public SubnetMatcher snm;\r
- public String user;\r
- public String[] nodes;\r
- }\r
- private static class Feed {\r
- public String loginfo;\r
- public String status;\r
- public SubnetMatcher[] subnets;\r
- public Hashtable<String, String> authusers = new Hashtable<String, String>();\r
- public Redirection[] redirections;\r
- public Target[] targets;\r
- }\r
- private Hashtable<String, String> params = new Hashtable<String, String>();\r
- private Hashtable<String, Feed> feeds = new Hashtable<String, Feed>();\r
- private Hashtable<String, DestInfo> nodeinfo = new Hashtable<String, DestInfo>();\r
- private Hashtable<String, DestInfo> subinfo = new Hashtable<String, DestInfo>();\r
- private Hashtable<String, IsFrom> nodes = new Hashtable<String, IsFrom>();\r
- private String myname;\r
- private String myauth;\r
- private DestInfo[] alldests;\r
- private int rrcntr;\r
- /**\r
- * Process the raw provisioning data to configure this node\r
- * @param pd The parsed provisioning data\r
- * @param myname My name as seen by external systems\r
- * @param spooldir The directory where temporary files live\r
- * @param port The port number for URLs\r
- * @param nodeauthkey The keying string used to generate node authentication credentials\r
- */\r
- public NodeConfig(ProvData pd, String myname, String spooldir, int port, String nodeauthkey) {\r
- this.myname = myname;\r
- for (ProvParam p: pd.getParams()) {\r
- params.put(p.getName(), p.getValue());\r
- }\r
- Vector<DestInfo> div = new Vector<DestInfo>();\r
- myauth = NodeUtils.getNodeAuthHdr(myname, nodeauthkey);\r
- for (ProvNode pn: pd.getNodes()) {\r
- String cn = pn.getCName();\r
- if (nodeinfo.get(cn) != null) {\r
- continue;\r
- }\r
- String auth = NodeUtils.getNodeAuthHdr(cn, nodeauthkey);\r
- DestInfo di = new DestInfo("n:" + cn, spooldir + "/n/" + cn, null, "n2n-" + cn, "https://" + cn + ":" + port + "/internal/publish", cn, myauth, false, true);\r
- (new File(di.getSpool())).mkdirs();\r
- div.add(di);\r
- nodeinfo.put(cn, di);\r
- nodes.put(auth, new IsFrom(cn));\r
- }\r
- PathFinder pf = new PathFinder(myname, nodeinfo.keySet().toArray(new String[nodeinfo.size()]), pd.getHops());\r
- Hashtable<String, Vector<Redirection>> rdtab = new Hashtable<String, Vector<Redirection>>();\r
- for (ProvForceIngress pfi: pd.getForceIngress()) {\r
- Vector<Redirection> v = rdtab.get(pfi.getFeedId());\r
- if (v == null) {\r
- v = new Vector<Redirection>();\r
- rdtab.put(pfi.getFeedId(), v);\r
- }\r
- Redirection r = new Redirection();\r
- if (pfi.getSubnet() != null) {\r
- r.snm = new SubnetMatcher(pfi.getSubnet());\r
- }\r
- r.user = pfi.getUser();\r
- r.nodes = pfi.getNodes();\r
- v.add(r);\r
- }\r
- Hashtable<String, Hashtable<String, String>> pfutab = new Hashtable<String, Hashtable<String, String>>();\r
- for (ProvFeedUser pfu: pd.getFeedUsers()) {\r
- Hashtable<String, String> t = pfutab.get(pfu.getFeedId());\r
- if (t == null) {\r
- t = new Hashtable<String, String>();\r
- pfutab.put(pfu.getFeedId(), t);\r
- }\r
- t.put(pfu.getCredentials(), pfu.getUser());\r
- }\r
- Hashtable<String, String> egrtab = new Hashtable<String, String>();\r
- for (ProvForceEgress pfe: pd.getForceEgress()) {\r
- if (pfe.getNode().equals(myname) || nodeinfo.get(pfe.getNode()) == null) {\r
- continue;\r
- }\r
- egrtab.put(pfe.getSubId(), pfe.getNode());\r
- }\r
- Hashtable<String, Vector<SubnetMatcher>> pfstab = new Hashtable<String, Vector<SubnetMatcher>>();\r
- for (ProvFeedSubnet pfs: pd.getFeedSubnets()) {\r
- Vector<SubnetMatcher> v = pfstab.get(pfs.getFeedId());\r
- if (v == null) {\r
- v = new Vector<SubnetMatcher>();\r
- pfstab.put(pfs.getFeedId(), v);\r
- }\r
- v.add(new SubnetMatcher(pfs.getCidr()));\r
- }\r
- Hashtable<String, StringBuffer> ttab = new Hashtable<String, StringBuffer>();\r
- HashSet<String> allfeeds = new HashSet<String>();\r
- for (ProvFeed pfx: pd.getFeeds()) {\r
- if (pfx.getStatus() == null) {\r
- allfeeds.add(pfx.getId());\r
- }\r
- }\r
- for (ProvSubscription ps: pd.getSubscriptions()) {\r
- String sid = ps.getSubId();\r
- String fid = ps.getFeedId();\r
- if (!allfeeds.contains(fid)) {\r
- continue;\r
- }\r
- if (subinfo.get(sid) != null) {\r
- continue;\r
- }\r
- int sididx = 999;\r
- try {\r
- sididx = Integer.parseInt(sid);\r
- sididx -= sididx % 100;\r
- } catch (Exception e) {\r
- }\r
- String siddir = sididx + "/" + sid;\r
- DestInfo di = new DestInfo("s:" + sid, spooldir + "/s/" + siddir, sid, fid, ps.getURL(), ps.getAuthUser(), ps.getCredentials(), ps.isMetaDataOnly(), ps.isUsing100());\r
- (new File(di.getSpool())).mkdirs();\r
- div.add(di);\r
- subinfo.put(sid, di);\r
- String egr = egrtab.get(sid);\r
- if (egr != null) {\r
- sid = pf.getPath(egr) + sid;\r
- }\r
- StringBuffer sb = ttab.get(fid);\r
- if (sb == null) {\r
- sb = new StringBuffer();\r
- ttab.put(fid, sb);\r
- }\r
- sb.append(' ').append(sid);\r
- }\r
- alldests = div.toArray(new DestInfo[div.size()]);\r
- for (ProvFeed pfx: pd.getFeeds()) {\r
- String fid = pfx.getId();\r
- Feed f = feeds.get(fid);\r
- if (f != null) {\r
- continue;\r
- }\r
- f = new Feed();\r
- feeds.put(fid, f);\r
- f.loginfo = pfx.getLogData();\r
- f.status = pfx.getStatus();\r
- Vector<SubnetMatcher> v1 = pfstab.get(fid);\r
- if (v1 == null) {\r
- f.subnets = new SubnetMatcher[0];\r
- } else {\r
- f.subnets = v1.toArray(new SubnetMatcher[v1.size()]);\r
- }\r
- Hashtable<String, String> h1 = pfutab.get(fid);\r
- if (h1 == null) {\r
- h1 = new Hashtable<String, String>();\r
- }\r
- f.authusers = h1;\r
- Vector<Redirection> v2 = rdtab.get(fid);\r
- if (v2 == null) {\r
- f.redirections = new Redirection[0];\r
- } else {\r
- f.redirections = v2.toArray(new Redirection[v2.size()]);\r
- }\r
- StringBuffer sb = ttab.get(fid);\r
- if (sb == null) {\r
- f.targets = new Target[0];\r
- } else {\r
- f.targets = parseRouting(sb.toString());\r
- }\r
- }\r
- }\r
- /**\r
- * Parse a target string into an array of targets\r
- * @param routing Target string\r
- * @return Array of targets.\r
- */\r
- public Target[] parseRouting(String routing) {\r
- routing = routing.trim();\r
- if ("".equals(routing)) {\r
- return(new Target[0]);\r
- }\r
- String[] xx = routing.split("\\s+");\r
- Hashtable<String, Target> tmap = new Hashtable<String, Target>();\r
- HashSet<String> subset = new HashSet<String>();\r
- Vector<Target> tv = new Vector<Target>();\r
- Target[] ret = new Target[xx.length];\r
- for (int i = 0; i < xx.length; i++) {\r
- String t = xx[i];\r
- int j = t.indexOf('/');\r
- if (j == -1) {\r
- DestInfo di = subinfo.get(t);\r
- if (di == null) {\r
- tv.add(new Target(null, t));\r
- } else {\r
- if (!subset.contains(t)) {\r
- subset.add(t);\r
- tv.add(new Target(di, null));\r
- }\r
- }\r
- } else {\r
- String node = t.substring(0, j);\r
- String rtg = t.substring(j + 1);\r
- DestInfo di = nodeinfo.get(node);\r
- if (di == null) {\r
- tv.add(new Target(null, t));\r
- } else {\r
- Target tt = tmap.get(node);\r
- if (tt == null) {\r
- tt = new Target(di, rtg);\r
- tmap.put(node, tt);\r
- tv.add(tt);\r
- } else {\r
- tt.addRouting(rtg);\r
- }\r
- }\r
- }\r
- }\r
- return(tv.toArray(new Target[tv.size()]));\r
- }\r
- /**\r
- * Check whether this is a valid node-to-node transfer\r
- * @param credentials Credentials offered by the supposed node\r
- * @param ip IP address the request came from\r
- */\r
- public boolean isAnotherNode(String credentials, String ip) {\r
- IsFrom n = nodes.get(credentials);\r
- return (n != null && n.isFrom(ip));\r
- }\r
- /**\r
- * Check whether publication is allowed.\r
- * @param feedid The ID of the feed being requested.\r
- * @param credentials The offered credentials\r
- * @param ip The requesting IP address\r
- */\r
- public String isPublishPermitted(String feedid, String credentials, String ip) {\r
- Feed f = feeds.get(feedid);\r
- String nf = "Feed does not exist";\r
- if (f != null) {\r
- nf = f.status;\r
- }\r
- if (nf != null) {\r
- return(nf);\r
- }\r
- String user = f.authusers.get(credentials);\r
- if (user == null) {\r
- return("Publisher not permitted for this feed");\r
- }\r
- if (f.subnets.length == 0) {\r
- return(null);\r
- }\r
- byte[] addr = NodeUtils.getInetAddress(ip);\r
- for (SubnetMatcher snm: f.subnets) {\r
- if (snm.matches(addr)) {\r
- return(null);\r
- }\r
- }\r
- return("Publisher not permitted for this feed");\r
- }\r
- /**\r
- * Get authenticated user\r
- */\r
- public String getAuthUser(String feedid, String credentials) {\r
- return(feeds.get(feedid).authusers.get(credentials));\r
- }\r
- /**\r
- * Check if the request should be redirected to a different ingress node\r
- */\r
- public String getIngressNode(String feedid, String user, String ip) {\r
- Feed f = feeds.get(feedid);\r
- if (f.redirections.length == 0) {\r
- return(null);\r
- }\r
- byte[] addr = NodeUtils.getInetAddress(ip);\r
- for (Redirection r: f.redirections) {\r
- if (r.user != null && !user.equals(r.user)) {\r
- continue;\r
- }\r
- if (r.snm != null && !r.snm.matches(addr)) {\r
- continue;\r
- }\r
- for (String n: r.nodes) {\r
- if (myname.equals(n)) {\r
- return(null);\r
- }\r
- }\r
- if (r.nodes.length == 0) {\r
- return(null);\r
- }\r
- return(r.nodes[rrcntr++ % r.nodes.length]);\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Get a provisioned configuration parameter\r
- */\r
- public String getProvParam(String name) {\r
- return(params.get(name));\r
- }\r
- /**\r
- * Get all the DestInfos\r
- */\r
- public DestInfo[] getAllDests() {\r
- return(alldests);\r
- }\r
- /**\r
- * Get the targets for a feed\r
- * @param feedid The feed ID\r
- * @return The targets this feed should be delivered to\r
- */\r
- public Target[] getTargets(String feedid) {\r
- if (feedid == null) {\r
- return(new Target[0]);\r
- }\r
- Feed f = feeds.get(feedid);\r
- if (f == null) {\r
- return(new Target[0]);\r
- }\r
- return(f.targets);\r
- }\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid) {\r
- DestInfo di = subinfo.get(subid);\r
- if (di == null) {\r
- return(null);\r
- }\r
- return(di.getLogData());\r
- }\r
- /**\r
- * Get the spool directory for a subscription\r
- * @param subid The subscription ID\r
- * @return The spool directory\r
- */\r
- public String getSpoolDir(String subid) {\r
- DestInfo di = subinfo.get(subid);\r
- if (di == null) {\r
- return(null);\r
- }\r
- return(di.getSpool());\r
- }\r
- /**\r
- * Get the Authorization value this node uses\r
- * @return The Authorization header value for this node\r
- */\r
- public String getMyAuth() {\r
- return(myauth);\r
- }\r
-\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.io.*;
+
+/**
+ * Processed configuration for this node.
+ * <p>
+ * The NodeConfig represents a processed configuration from the Data Router
+ * provisioning server. Each time configuration data is received from the
+ * provisioning server, a new NodeConfig is created and the previous one
+ * discarded.
+ */
+public class NodeConfig {
+ /**
+ * Raw configuration entry for a data router node
+ */
+ public static class ProvNode {
+ private String cname;
+
+ /**
+ * Construct a node configuration entry.
+ *
+ * @param cname The cname of the node.
+ */
+ public ProvNode(String cname) {
+ this.cname = cname;
+ }
+
+ /**
+ * Get the cname of the node
+ */
+ public String getCName() {
+ return (cname);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a provisioning parameter
+ */
+ public static class ProvParam {
+ private String name;
+ private String value;
+
+ /**
+ * Construct a provisioning parameter configuration entry.
+ *
+ * @param name The name of the parameter.
+ * @param value The value of the parameter.
+ */
+ public ProvParam(String name, String value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ /**
+ * Get the name of the parameter.
+ */
+ public String getName() {
+ return (name);
+ }
+
+ /**
+ * Get the value of the parameter.
+ */
+ public String getValue() {
+ return (value);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a data feed.
+ */
+ public static class ProvFeed {
+ private String id;
+ private String logdata;
+ private String status;
+
+ /**
+ * Construct a feed configuration entry.
+ *
+ * @param id The feed ID of the entry.
+ * @param logdata String for log entries about the entry.
+ * @param status The reason why this feed cannot be used (Feed has been deleted, Feed has been suspended) or null if it is valid.
+ */
+ public ProvFeed(String id, String logdata, String status) {
+ this.id = id;
+ this.logdata = logdata;
+ this.status = status;
+ }
+
+ /**
+ * Get the feed id of the data feed.
+ */
+ public String getId() {
+ return (id);
+ }
+
+ /**
+ * Get the log data of the data feed.
+ */
+ public String getLogData() {
+ return (logdata);
+ }
+
+ /**
+ * Get the status of the data feed.
+ */
+ public String getStatus() {
+ return (status);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a feed user.
+ */
+ public static class ProvFeedUser {
+ private String feedid;
+ private String user;
+ private String credentials;
+
+ /**
+ * Construct a feed user configuration entry
+ *
+ * @param feedid The feed id.
+ * @param user The user that will publish to the feed.
+ * @param credentials The Authorization header the user will use to publish.
+ */
+ public ProvFeedUser(String feedid, String user, String credentials) {
+ this.feedid = feedid;
+ this.user = user;
+ this.credentials = credentials;
+ }
+
+ /**
+ * Get the feed id of the feed user.
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the user for the feed user.
+ */
+ public String getUser() {
+ return (user);
+ }
+
+ /**
+ * Get the credentials for the feed user.
+ */
+ public String getCredentials() {
+ return (credentials);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a feed subnet
+ */
+ public static class ProvFeedSubnet {
+ private String feedid;
+ private String cidr;
+
+ /**
+ * Construct a feed subnet configuration entry
+ *
+ * @param feedid The feed ID
+ * @param cidr The CIDR allowed to publish to the feed.
+ */
+ public ProvFeedSubnet(String feedid, String cidr) {
+ this.feedid = feedid;
+ this.cidr = cidr;
+ }
+
+ /**
+ * Get the feed id of the feed subnet.
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the CIDR of the feed subnet.
+ */
+ public String getCidr() {
+ return (cidr);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a subscription
+ */
+ public static class ProvSubscription {
+ private String subid;
+ private String feedid;
+ private String url;
+ private String authuser;
+ private String credentials;
+ private boolean metaonly;
+ private boolean use100;
+
+ /**
+ * Construct a subscription configuration entry
+ *
+ * @param subid The subscription ID
+ * @param feedid The feed ID
+ * @param url The base delivery URL (not including the fileid)
+ * @param authuser The user in the credentials used to deliver
+ * @param credentials The credentials used to authenticate to the delivery URL exactly as they go in the Authorization header.
+ * @param metaonly Is this a meta data only subscription?
+ * @param use100 Should we send Expect: 100-continue?
+ */
+ public ProvSubscription(String subid, String feedid, String url, String authuser, String credentials, boolean metaonly, boolean use100) {
+ this.subid = subid;
+ this.feedid = feedid;
+ this.url = url;
+ this.authuser = authuser;
+ this.credentials = credentials;
+ this.metaonly = metaonly;
+ this.use100 = use100;
+ }
+
+ /**
+ * Get the subscription ID
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the feed ID
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the delivery URL
+ */
+ public String getURL() {
+ return (url);
+ }
+
+ /**
+ * Get the user
+ */
+ public String getAuthUser() {
+ return (authuser);
+ }
+
+ /**
+ * Get the delivery credentials
+ */
+ public String getCredentials() {
+ return (credentials);
+ }
+
+ /**
+ * Is this a meta data only subscription?
+ */
+ public boolean isMetaDataOnly() {
+ return (metaonly);
+ }
+
+ /**
+ * Should we send Expect: 100-continue?
+ */
+ public boolean isUsing100() {
+ return (use100);
+ }
+ }
+
+ /**
+ * Raw configuration entry for controlled ingress to the data router node
+ */
+ public static class ProvForceIngress {
+ private String feedid;
+ private String subnet;
+ private String user;
+ private String[] nodes;
+
+ /**
+ * Construct a forced ingress configuration entry
+ *
+ * @param feedid The feed ID that this entry applies to
+ * @param subnet The CIDR for which publisher IP addresses this entry applies to or "" if it applies to all publisher IP addresses
+ * @param user The publishing user this entry applies to or "" if it applies to all publishing users.
+ * @param nodes The array of FQDNs of the data router nodes to redirect publication attempts to.
+ */
+ public ProvForceIngress(String feedid, String subnet, String user, String[] nodes) {
+ this.feedid = feedid;
+ this.subnet = subnet;
+ this.user = user;
+ this.nodes = nodes;
+ }
+
+ /**
+ * Get the feed ID
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the subnet
+ */
+ public String getSubnet() {
+ return (subnet);
+ }
+
+ /**
+ * Get the user
+ */
+ public String getUser() {
+ return (user);
+ }
+
+ /**
+ * Get the node
+ */
+ public String[] getNodes() {
+ return (nodes);
+ }
+ }
+
+ /**
+ * Raw configuration entry for controlled egress from the data router
+ */
+ public static class ProvForceEgress {
+ private String subid;
+ private String node;
+
+ /**
+ * Construct a forced egress configuration entry
+ *
+ * @param subid The subscription ID the subscription with forced egress
+ * @param node The node handling deliveries for this subscription
+ */
+ public ProvForceEgress(String subid, String node) {
+ this.subid = subid;
+ this.node = node;
+ }
+
+ /**
+ * Get the subscription ID
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the node
+ */
+ public String getNode() {
+ return (node);
+ }
+ }
+
+ /**
+ * Raw configuration entry for routing within the data router network
+ */
+ public static class ProvHop {
+ private String from;
+ private String to;
+ private String via;
+
+ /**
+ * A human readable description of this entry
+ */
+ public String toString() {
+ return ("Hop " + from + "->" + to + " via " + via);
+ }
+
+ /**
+ * Construct a hop entry
+ *
+ * @param from The FQDN of the node with the data to be delivered
+ * @param to The FQDN of the node that will deliver to the subscriber
+ * @param via The FQDN of the node where the from node should send the data
+ */
+ public ProvHop(String from, String to, String via) {
+ this.from = from;
+ this.to = to;
+ this.via = via;
+ }
+
+ /**
+ * Get the from node
+ */
+ public String getFrom() {
+ return (from);
+ }
+
+ /**
+ * Get the to node
+ */
+ public String getTo() {
+ return (to);
+ }
+
+ /**
+ * Get the next intermediate node
+ */
+ public String getVia() {
+ return (via);
+ }
+ }
+
+ private static class Redirection {
+ public SubnetMatcher snm;
+ public String user;
+ public String[] nodes;
+ }
+
+ private static class Feed {
+ public String loginfo;
+ public String status;
+ public SubnetMatcher[] subnets;
+ public Hashtable<String, String> authusers = new Hashtable<String, String>();
+ public Redirection[] redirections;
+ public Target[] targets;
+ }
+
+ private Hashtable<String, String> params = new Hashtable<String, String>();
+ private Hashtable<String, Feed> feeds = new Hashtable<String, Feed>();
+ private Hashtable<String, DestInfo> nodeinfo = new Hashtable<String, DestInfo>();
+ private Hashtable<String, DestInfo> subinfo = new Hashtable<String, DestInfo>();
+ private Hashtable<String, IsFrom> nodes = new Hashtable<String, IsFrom>();
+ private String myname;
+ private String myauth;
+ private DestInfo[] alldests;
+ private int rrcntr;
+
+ /**
+ * Process the raw provisioning data to configure this node
+ *
+ * @param pd The parsed provisioning data
+ * @param myname My name as seen by external systems
+ * @param spooldir The directory where temporary files live
+ * @param port The port number for URLs
+ * @param nodeauthkey The keying string used to generate node authentication credentials
+ */
+ public NodeConfig(ProvData pd, String myname, String spooldir, int port, String nodeauthkey) {
+ this.myname = myname;
+ for (ProvParam p : pd.getParams()) {
+ params.put(p.getName(), p.getValue());
+ }
+ Vector<DestInfo> div = new Vector<DestInfo>();
+ myauth = NodeUtils.getNodeAuthHdr(myname, nodeauthkey);
+ for (ProvNode pn : pd.getNodes()) {
+ String cn = pn.getCName();
+ if (nodeinfo.get(cn) != null) {
+ continue;
+ }
+ String auth = NodeUtils.getNodeAuthHdr(cn, nodeauthkey);
+ DestInfo di = new DestInfo("n:" + cn, spooldir + "/n/" + cn, null, "n2n-" + cn, "https://" + cn + ":" + port + "/internal/publish", cn, myauth, false, true);
+ (new File(di.getSpool())).mkdirs();
+ div.add(di);
+ nodeinfo.put(cn, di);
+ nodes.put(auth, new IsFrom(cn));
+ }
+ PathFinder pf = new PathFinder(myname, nodeinfo.keySet().toArray(new String[nodeinfo.size()]), pd.getHops());
+ Hashtable<String, Vector<Redirection>> rdtab = new Hashtable<String, Vector<Redirection>>();
+ for (ProvForceIngress pfi : pd.getForceIngress()) {
+ Vector<Redirection> v = rdtab.get(pfi.getFeedId());
+ if (v == null) {
+ v = new Vector<Redirection>();
+ rdtab.put(pfi.getFeedId(), v);
+ }
+ Redirection r = new Redirection();
+ if (pfi.getSubnet() != null) {
+ r.snm = new SubnetMatcher(pfi.getSubnet());
+ }
+ r.user = pfi.getUser();
+ r.nodes = pfi.getNodes();
+ v.add(r);
+ }
+ Hashtable<String, Hashtable<String, String>> pfutab = new Hashtable<String, Hashtable<String, String>>();
+ for (ProvFeedUser pfu : pd.getFeedUsers()) {
+ Hashtable<String, String> t = pfutab.get(pfu.getFeedId());
+ if (t == null) {
+ t = new Hashtable<String, String>();
+ pfutab.put(pfu.getFeedId(), t);
+ }
+ t.put(pfu.getCredentials(), pfu.getUser());
+ }
+ Hashtable<String, String> egrtab = new Hashtable<String, String>();
+ for (ProvForceEgress pfe : pd.getForceEgress()) {
+ if (pfe.getNode().equals(myname) || nodeinfo.get(pfe.getNode()) == null) {
+ continue;
+ }
+ egrtab.put(pfe.getSubId(), pfe.getNode());
+ }
+ Hashtable<String, Vector<SubnetMatcher>> pfstab = new Hashtable<String, Vector<SubnetMatcher>>();
+ for (ProvFeedSubnet pfs : pd.getFeedSubnets()) {
+ Vector<SubnetMatcher> v = pfstab.get(pfs.getFeedId());
+ if (v == null) {
+ v = new Vector<SubnetMatcher>();
+ pfstab.put(pfs.getFeedId(), v);
+ }
+ v.add(new SubnetMatcher(pfs.getCidr()));
+ }
+ Hashtable<String, StringBuffer> ttab = new Hashtable<String, StringBuffer>();
+ HashSet<String> allfeeds = new HashSet<String>();
+ for (ProvFeed pfx : pd.getFeeds()) {
+ if (pfx.getStatus() == null) {
+ allfeeds.add(pfx.getId());
+ }
+ }
+ for (ProvSubscription ps : pd.getSubscriptions()) {
+ String sid = ps.getSubId();
+ String fid = ps.getFeedId();
+ if (!allfeeds.contains(fid)) {
+ continue;
+ }
+ if (subinfo.get(sid) != null) {
+ continue;
+ }
+ int sididx = 999;
+ try {
+ sididx = Integer.parseInt(sid);
+ sididx -= sididx % 100;
+ } catch (Exception e) {
+ }
+ String siddir = sididx + "/" + sid;
+ DestInfo di = new DestInfo("s:" + sid, spooldir + "/s/" + siddir, sid, fid, ps.getURL(), ps.getAuthUser(), ps.getCredentials(), ps.isMetaDataOnly(), ps.isUsing100());
+ (new File(di.getSpool())).mkdirs();
+ div.add(di);
+ subinfo.put(sid, di);
+ String egr = egrtab.get(sid);
+ if (egr != null) {
+ sid = pf.getPath(egr) + sid;
+ }
+ StringBuffer sb = ttab.get(fid);
+ if (sb == null) {
+ sb = new StringBuffer();
+ ttab.put(fid, sb);
+ }
+ sb.append(' ').append(sid);
+ }
+ alldests = div.toArray(new DestInfo[div.size()]);
+ for (ProvFeed pfx : pd.getFeeds()) {
+ String fid = pfx.getId();
+ Feed f = feeds.get(fid);
+ if (f != null) {
+ continue;
+ }
+ f = new Feed();
+ feeds.put(fid, f);
+ f.loginfo = pfx.getLogData();
+ f.status = pfx.getStatus();
+ Vector<SubnetMatcher> v1 = pfstab.get(fid);
+ if (v1 == null) {
+ f.subnets = new SubnetMatcher[0];
+ } else {
+ f.subnets = v1.toArray(new SubnetMatcher[v1.size()]);
+ }
+ Hashtable<String, String> h1 = pfutab.get(fid);
+ if (h1 == null) {
+ h1 = new Hashtable<String, String>();
+ }
+ f.authusers = h1;
+ Vector<Redirection> v2 = rdtab.get(fid);
+ if (v2 == null) {
+ f.redirections = new Redirection[0];
+ } else {
+ f.redirections = v2.toArray(new Redirection[v2.size()]);
+ }
+ StringBuffer sb = ttab.get(fid);
+ if (sb == null) {
+ f.targets = new Target[0];
+ } else {
+ f.targets = parseRouting(sb.toString());
+ }
+ }
+ }
+
+ /**
+ * Parse a target string into an array of targets
+ *
+ * @param routing Target string
+ * @return Array of targets.
+ */
+ public Target[] parseRouting(String routing) {
+ routing = routing.trim();
+ if ("".equals(routing)) {
+ return (new Target[0]);
+ }
+ String[] xx = routing.split("\\s+");
+ Hashtable<String, Target> tmap = new Hashtable<String, Target>();
+ HashSet<String> subset = new HashSet<String>();
+ Vector<Target> tv = new Vector<Target>();
+ Target[] ret = new Target[xx.length];
+ for (int i = 0; i < xx.length; i++) {
+ String t = xx[i];
+ int j = t.indexOf('/');
+ if (j == -1) {
+ DestInfo di = subinfo.get(t);
+ if (di == null) {
+ tv.add(new Target(null, t));
+ } else {
+ if (!subset.contains(t)) {
+ subset.add(t);
+ tv.add(new Target(di, null));
+ }
+ }
+ } else {
+ String node = t.substring(0, j);
+ String rtg = t.substring(j + 1);
+ DestInfo di = nodeinfo.get(node);
+ if (di == null) {
+ tv.add(new Target(null, t));
+ } else {
+ Target tt = tmap.get(node);
+ if (tt == null) {
+ tt = new Target(di, rtg);
+ tmap.put(node, tt);
+ tv.add(tt);
+ } else {
+ tt.addRouting(rtg);
+ }
+ }
+ }
+ }
+ return (tv.toArray(new Target[tv.size()]));
+ }
+
+ /**
+ * Check whether this is a valid node-to-node transfer
+ *
+ * @param credentials Credentials offered by the supposed node
+ * @param ip IP address the request came from
+ */
+ public boolean isAnotherNode(String credentials, String ip) {
+ IsFrom n = nodes.get(credentials);
+ return (n != null && n.isFrom(ip));
+ }
+
+ /**
+ * Check whether publication is allowed.
+ *
+ * @param feedid The ID of the feed being requested.
+ * @param credentials The offered credentials
+ * @param ip The requesting IP address
+ */
+ public String isPublishPermitted(String feedid, String credentials, String ip) {
+ Feed f = feeds.get(feedid);
+ String nf = "Feed does not exist";
+ if (f != null) {
+ nf = f.status;
+ }
+ if (nf != null) {
+ return (nf);
+ }
+ String user = f.authusers.get(credentials);
+ if (user == null) {
+ return ("Publisher not permitted for this feed");
+ }
+ if (f.subnets.length == 0) {
+ return (null);
+ }
+ byte[] addr = NodeUtils.getInetAddress(ip);
+ for (SubnetMatcher snm : f.subnets) {
+ if (snm.matches(addr)) {
+ return (null);
+ }
+ }
+ return ("Publisher not permitted for this feed");
+ }
+
+ /**
+ * Get authenticated user
+ */
+ public String getAuthUser(String feedid, String credentials) {
+ return (feeds.get(feedid).authusers.get(credentials));
+ }
+
+ /**
+ * Check if the request should be redirected to a different ingress node
+ */
+ public String getIngressNode(String feedid, String user, String ip) {
+ Feed f = feeds.get(feedid);
+ if (f.redirections.length == 0) {
+ return (null);
+ }
+ byte[] addr = NodeUtils.getInetAddress(ip);
+ for (Redirection r : f.redirections) {
+ if (r.user != null && !user.equals(r.user)) {
+ continue;
+ }
+ if (r.snm != null && !r.snm.matches(addr)) {
+ continue;
+ }
+ for (String n : r.nodes) {
+ if (myname.equals(n)) {
+ return (null);
+ }
+ }
+ if (r.nodes.length == 0) {
+ return (null);
+ }
+ return (r.nodes[rrcntr++ % r.nodes.length]);
+ }
+ return (null);
+ }
+
+ /**
+ * Get a provisioned configuration parameter
+ */
+ public String getProvParam(String name) {
+ return (params.get(name));
+ }
+
+ /**
+ * Get all the DestInfos
+ */
+ public DestInfo[] getAllDests() {
+ return (alldests);
+ }
+
+ /**
+ * Get the targets for a feed
+ *
+ * @param feedid The feed ID
+ * @return The targets this feed should be delivered to
+ */
+ public Target[] getTargets(String feedid) {
+ if (feedid == null) {
+ return (new Target[0]);
+ }
+ Feed f = feeds.get(feedid);
+ if (f == null) {
+ return (new Target[0]);
+ }
+ return (f.targets);
+ }
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid) {
+ DestInfo di = subinfo.get(subid);
+ if (di == null) {
+ return (null);
+ }
+ return (di.getLogData());
+ }
+
+ /**
+ * Get the spool directory for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The spool directory
+ */
+ public String getSpoolDir(String subid) {
+ DestInfo di = subinfo.get(subid);
+ if (di == null) {
+ return (null);
+ }
+ return (di.getSpool());
+ }
+
+ /**
+ * Get the Authorization value this node uses
+ *
+ * @return The Authorization header value for this node
+ */
+ public String getMyAuth() {
+ return (myauth);
+ }
+
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.net.*;\r
-import java.util.*;\r
-import java.io.*;\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-\r
-/**\r
- * Maintain the configuration of a Data Router node\r
- * <p>\r
- * The NodeConfigManager is the single point of contact for servlet, delivery, event logging, and log retention subsystems to access configuration information. (Log4J has its own configuration mechanism).\r
- * <p>\r
- * There are two basic sets of configuration data. The\r
- * static local configuration data, stored in a local configuration file (created\r
- * as part of installation by SWM), and the dynamic global\r
- * configuration data fetched from the data router provisioning server.\r
- */\r
-public class NodeConfigManager implements DeliveryQueueHelper {\r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");\r
- private static NodeConfigManager base = new NodeConfigManager();\r
-\r
- private Timer timer = new Timer("Node Configuration Timer", true);\r
- private long maxfailuretimer;\r
- private long initfailuretimer;\r
- private long expirationtimer;\r
- private double failurebackoff;\r
- private long fairtimelimit;\r
- private int fairfilelimit;\r
- private double fdpstart;\r
- private double fdpstop;\r
- private int deliverythreads;\r
- private String provurl;\r
- private String provhost;\r
- private IsFrom provcheck;\r
- private int gfport;\r
- private int svcport;\r
- private int port;\r
- private String spooldir;\r
- private String logdir;\r
- private long logretention;\r
- private String redirfile;\r
- private String kstype;\r
- private String ksfile;\r
- private String kspass;\r
- private String kpass;\r
- private String tstype;\r
- private String tsfile;\r
- private String tspass;\r
- private String myname;\r
- private RedirManager rdmgr;\r
- private RateLimitedOperation pfetcher;\r
- private NodeConfig config;\r
- private File quiesce;\r
- private PublishId pid;\r
- private String nak;\r
- private TaskList configtasks = new TaskList();\r
- private String eventlogurl;\r
- private String eventlogprefix;\r
- private String eventlogsuffix;\r
- private String eventloginterval;\r
- private boolean followredirects;\r
-\r
- \r
- /**\r
- * Get the default node configuration manager\r
- */\r
- public static NodeConfigManager getInstance() {\r
- return(base);\r
- }\r
- /**\r
- * Initialize the configuration of a Data Router node\r
- */\r
- private NodeConfigManager() {\r
- Properties p = new Properties();\r
- try {\r
- p.load(new FileInputStream(System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties")));\r
- } catch (Exception e) {\r
- \r
- NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");\r
- eelflogger.error(EelfMsgs.MESSAGE_PROPERTIES_LOAD_ERROR);\r
- logger.error("NODE0301 Unable to load local configuration file " + System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties"), e);\r
- }\r
- provurl = p.getProperty("ProvisioningURL", "https://feeds-drtr.web.att.com/internal/prov");\r
- try {\r
- provhost = (new URL(provurl)).getHost();\r
- } catch (Exception e) {\r
- NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");\r
- eelflogger.error(EelfMsgs.MESSAGE_BAD_PROV_URL, provurl);\r
- logger.error("NODE0302 Bad provisioning server URL " + provurl);\r
- System.exit(1);\r
- }\r
- logger.info("NODE0303 Provisioning server is " + provhost);\r
- eventlogurl = p.getProperty("LogUploadURL", "https://feeds-drtr.web.att.com/internal/logs");\r
- provcheck = new IsFrom(provhost);\r
- gfport = Integer.parseInt(p.getProperty("IntHttpPort", "8080"));\r
- svcport = Integer.parseInt(p.getProperty("IntHttpsPort", "8443"));\r
- port = Integer.parseInt(p.getProperty("ExtHttpsPort", "443"));\r
- long minpfinterval = Long.parseLong(p.getProperty("MinProvFetchInterval", "10000"));\r
- long minrsinterval = Long.parseLong(p.getProperty("MinRedirSaveInterval", "10000"));\r
- spooldir = p.getProperty("SpoolDir", "spool");\r
- File fdir = new File(spooldir + "/f");\r
- fdir.mkdirs();\r
- for (File junk: fdir.listFiles()) {\r
- if (junk.isFile()) {\r
- junk.delete();\r
- }\r
- }\r
- logdir = p.getProperty("LogDir", "logs");\r
- (new File(logdir)).mkdirs();\r
- logretention = Long.parseLong(p.getProperty("LogRetention", "30")) * 86400000L;\r
- eventlogprefix = logdir + "/events";\r
- eventlogsuffix = ".log";\r
- String redirfile = p.getProperty("RedirectionFile", "etc/redirections.dat");\r
- kstype = p.getProperty("KeyStoreType", "jks");\r
- ksfile = p.getProperty("KeyStoreFile", "etc/keystore");\r
- kspass = p.getProperty("KeyStorePassword", "changeme");\r
- kpass = p.getProperty("KeyPassword", "changeme");\r
- tstype = p.getProperty("TrustStoreType", "jks");\r
- tsfile = p.getProperty("TrustStoreFile");\r
- tspass = p.getProperty("TrustStorePassword", "changeme");\r
- if (tsfile != null && tsfile.length() > 0) {\r
- System.setProperty("javax.net.ssl.trustStoreType", tstype);\r
- System.setProperty("javax.net.ssl.trustStore", tsfile);\r
- System.setProperty("javax.net.ssl.trustStorePassword", tspass);\r
- }\r
- nak = p.getProperty("NodeAuthKey", "Node123!");\r
- quiesce = new File(p.getProperty("QuiesceFile", "etc/SHUTDOWN"));\r
- myname = NodeUtils.getCanonicalName(kstype, ksfile, kspass);\r
- if (myname == null) {\r
- NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");\r
- eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_FETCH_ERROR, ksfile);\r
- logger.error("NODE0309 Unable to fetch canonical name from keystore file " + ksfile);\r
- System.exit(1);\r
- }\r
- logger.info("NODE0304 My certificate says my name is " + myname);\r
- pid = new PublishId(myname);\r
- rdmgr = new RedirManager(redirfile, minrsinterval, timer);\r
- pfetcher = new RateLimitedOperation(minpfinterval, timer) {\r
- public void run() {\r
- fetchconfig();\r
- }\r
- };\r
- logger.info("NODE0305 Attempting to fetch configuration at " + provurl);\r
- pfetcher.request();\r
- }\r
- private void localconfig() {\r
- followredirects = Boolean.parseBoolean(getProvParam("FOLLOW_REDIRECTS", "false"));\r
- eventloginterval = getProvParam("LOGROLL_INTERVAL", "5m");\r
- initfailuretimer = 10000;\r
- maxfailuretimer = 3600000;\r
- expirationtimer = 86400000;\r
- failurebackoff = 2.0;\r
- deliverythreads = 40;\r
- fairfilelimit = 100;\r
- fairtimelimit = 60000;\r
- fdpstart = 0.05;\r
- fdpstop = 0.2;\r
- try { initfailuretimer = (long)(Double.parseDouble(getProvParam("DELIVERY_INIT_RETRY_INTERVAL")) * 1000); } catch (Exception e) {}\r
- try { maxfailuretimer = (long)(Double.parseDouble(getProvParam("DELIVERY_MAX_RETRY_INTERVAL")) * 1000); } catch (Exception e) {}\r
- try { expirationtimer = (long)(Double.parseDouble(getProvParam("DELIVERY_MAX_AGE")) * 1000); } catch (Exception e) {}\r
- try { failurebackoff = Double.parseDouble(getProvParam("DELIVERY_RETRY_RATIO")); } catch (Exception e) {}\r
- try { deliverythreads = Integer.parseInt(getProvParam("DELIVERY_THREADS")); } catch (Exception e) {}\r
- try { fairfilelimit = Integer.parseInt(getProvParam("FAIR_FILE_LIMIT")); } catch (Exception e) {}\r
- try { fairtimelimit = (long)(Double.parseDouble(getProvParam("FAIR_TIME_LIMIT")) * 1000); } catch (Exception e) {}\r
- try { fdpstart = Double.parseDouble(getProvParam("FREE_DISK_RED_PERCENT")) / 100.0; } catch (Exception e) {}\r
- try { fdpstop = Double.parseDouble(getProvParam("FREE_DISK_YELLOW_PERCENT")) / 100.0; } catch (Exception e) {}\r
- if (fdpstart < 0.01) {\r
- fdpstart = 0.01;\r
- }\r
- if (fdpstart > 0.5) {\r
- fdpstart = 0.5;\r
- }\r
- if (fdpstop < fdpstart) {\r
- fdpstop = fdpstart;\r
- }\r
- if (fdpstop > 0.5) {\r
- fdpstop = 0.5;\r
- }\r
- }\r
- private void fetchconfig() {\r
- try {\r
- System.out.println("provurl:: "+provurl);\r
- Reader r = new InputStreamReader((new URL(provurl)).openStream());\r
- config = new NodeConfig(new ProvData(r), myname, spooldir, port, nak);\r
- localconfig();\r
- configtasks.startRun();\r
- Runnable rr;\r
- while ((rr = configtasks.next()) != null) {\r
- try {\r
- rr.run();\r
- } catch (Exception e) {\r
- }\r
- }\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- NodeUtils.setIpAndFqdnForEelf("fetchconfigs");\r
- eelflogger.error(EelfMsgs.MESSAGE_CONF_FAILED, e.toString());\r
- logger.error("NODE0306 Configuration failed " + e.toString() + " - try again later", e);\r
- pfetcher.request();\r
- }\r
- }\r
- /**\r
- * Process a gofetch request from a particular IP address. If the\r
- * IP address is not an IP address we would go to to fetch the\r
- * provisioning data, ignore the request. If the data has been\r
- * fetched very recently (default 10 seconds), wait a while before fetching again.\r
- */\r
- public synchronized void gofetch(String remoteaddr) {\r
- if (provcheck.isFrom(remoteaddr)) {\r
- logger.info("NODE0307 Received configuration fetch request from provisioning server " + remoteaddr);\r
- pfetcher.request();\r
- } else {\r
- logger.info("NODE0308 Received configuration fetch request from unexpected server " + remoteaddr);\r
- }\r
- }\r
- /**\r
- * Am I configured?\r
- */\r
- public boolean isConfigured() {\r
- return(config != null);\r
- }\r
- /**\r
- * Am I shut down?\r
- */\r
- public boolean isShutdown() {\r
- return(quiesce.exists());\r
- }\r
- /**\r
- * Given a routing string, get the targets.\r
- * @param routing Target string\r
- * @return array of targets\r
- */\r
- public Target[] parseRouting(String routing) {\r
- return(config.parseRouting(routing));\r
- }\r
- /**\r
- * Given a set of credentials and an IP address, is this request from another node?\r
- * @param credentials Credentials offered by the supposed node\r
- * @param ip IP address the request came from\r
- * @return If the credentials and IP address are recognized, true, otherwise false.\r
- */\r
- public boolean isAnotherNode(String credentials, String ip) {\r
- return(config.isAnotherNode(credentials, ip));\r
- }\r
- /**\r
- * Check whether publication is allowed.\r
- * @param feedid The ID of the feed being requested\r
- * @param credentials The offered credentials\r
- * @param ip The requesting IP address\r
- * @return True if the IP and credentials are valid for the specified feed.\r
- */\r
- public String isPublishPermitted(String feedid, String credentials, String ip) {\r
- return(config.isPublishPermitted(feedid, credentials, ip));\r
- }\r
- /**\r
- * Check who the user is given the feed ID and the offered credentials.\r
- * @param feedid The ID of the feed specified\r
- * @param credentials The offered credentials\r
- * @return Null if the credentials are invalid or the user if they are valid.\r
- */\r
- public String getAuthUser(String feedid, String credentials) {\r
- return(config.getAuthUser(feedid, credentials));\r
- }\r
- /**\r
- * Check if the publish request should be sent to another node based on the feedid, user, and source IP address.\r
- * @param feedid The ID of the feed specified\r
- * @param user The publishing user\r
- * @param ip The IP address of the publish endpoint\r
- * @return Null if the request should be accepted or the correct hostname if it should be sent to another node.\r
- */\r
- public String getIngressNode(String feedid, String user, String ip) {\r
- return(config.getIngressNode(feedid, user, ip));\r
- }\r
- /**\r
- * Get a provisioned configuration parameter (from the provisioning server configuration)\r
- * @param name The name of the parameter\r
- * @return The value of the parameter or null if it is not defined.\r
- */\r
- public String getProvParam(String name) {\r
- return(config.getProvParam(name));\r
- }\r
- /**\r
- * Get a provisioned configuration parameter (from the provisioning server configuration)\r
- * @param name The name of the parameter\r
- * @param deflt The value to use if the parameter is not defined\r
- * @return The value of the parameter or deflt if it is not defined.\r
- */\r
- public String getProvParam(String name, String deflt) {\r
- name = config.getProvParam(name);\r
- if (name == null) {\r
- name = deflt;\r
- }\r
- return(name);\r
- }\r
- /**\r
- * Generate a publish ID\r
- */\r
- public String getPublishId() {\r
- return(pid.next());\r
- }\r
- /**\r
- * Get all the outbound spooling destinations.\r
- * This will include both subscriptions and nodes.\r
- */\r
- public DestInfo[] getAllDests() {\r
- return(config.getAllDests());\r
- }\r
- /**\r
- * Register a task to run whenever the configuration changes\r
- */\r
- public void registerConfigTask(Runnable task) {\r
- configtasks.addTask(task);\r
- }\r
- /**\r
- * Deregister a task to run whenever the configuration changes\r
- */\r
- public void deregisterConfigTask(Runnable task) {\r
- configtasks.removeTask(task);\r
- }\r
- /**\r
- * Get the URL to deliver a message to.\r
- * @param destinfo The destination information\r
- * @param fileid The file ID\r
- * @return The URL to deliver to\r
- */\r
- public String getDestURL(DestInfo destinfo, String fileid) {\r
- String subid = destinfo.getSubId();\r
- String purl = destinfo.getURL();\r
- if (followredirects && subid != null) {\r
- purl = rdmgr.lookup(subid, purl);\r
- }\r
- return(purl + "/" + fileid);\r
- }\r
- /**\r
- * Is a destination redirected?\r
- */\r
- public boolean isDestRedirected(DestInfo destinfo) {\r
- return(followredirects && rdmgr.isRedirected(destinfo.getSubId()));\r
- }\r
- /**\r
- * Set up redirection on receipt of a 3XX from a target URL\r
- */\r
- public boolean handleRedirection(DestInfo destinfo, String redirto, String fileid) {\r
- fileid = "/" + fileid;\r
- String subid = destinfo.getSubId();\r
- String purl = destinfo.getURL();\r
- if (followredirects && subid != null && redirto.endsWith(fileid)) {\r
- redirto = redirto.substring(0, redirto.length() - fileid.length());\r
- if (!redirto.equals(purl)) {\r
- rdmgr.redirect(subid, purl, redirto);\r
- return(true);\r
- }\r
- }\r
- return(false);\r
- }\r
- /**\r
- * Handle unreachable target URL\r
- */\r
- public void handleUnreachable(DestInfo destinfo) {\r
- String subid = destinfo.getSubId();\r
- if (followredirects && subid != null) {\r
- rdmgr.forget(subid);\r
- }\r
- }\r
- /**\r
- * Get the timeout before retrying after an initial delivery failure\r
- */\r
- public long getInitFailureTimer() {\r
- return(initfailuretimer);\r
- }\r
- /**\r
- * Get the maximum timeout between delivery attempts\r
- */\r
- public long getMaxFailureTimer() {\r
- return(maxfailuretimer);\r
- }\r
- /**\r
- * Get the ratio between consecutive delivery attempts\r
- */\r
- public double getFailureBackoff() {\r
- return(failurebackoff);\r
- }\r
- /**\r
- * Get the expiration timer for deliveries\r
- */\r
- public long getExpirationTimer() {\r
- return(expirationtimer);\r
- }\r
- /**\r
- * Get the maximum number of file delivery attempts before checking\r
- * if another queue has work to be performed.\r
- */\r
- public int getFairFileLimit() {\r
- return(fairfilelimit);\r
- }\r
- /**\r
- * Get the maximum amount of time spent delivering files before\r
- * checking if another queue has work to be performed.\r
- */\r
- public long getFairTimeLimit() {\r
- return(fairtimelimit);\r
- }\r
- /**\r
- * Get the targets for a feed\r
- * @param feedid The feed ID\r
- * @return The targets this feed should be delivered to\r
- */\r
- public Target[] getTargets(String feedid) {\r
- return(config.getTargets(feedid));\r
- }\r
- /**\r
- * Get the spool directory for temporary files\r
- */\r
- public String getSpoolDir() {\r
- return(spooldir + "/f");\r
- }\r
- /**\r
- * Get the base directory for spool directories\r
- */\r
- public String getSpoolBase() {\r
- return(spooldir);\r
- }\r
- /**\r
- * Get the key store type\r
- */\r
- public String getKSType() {\r
- return(kstype);\r
- }\r
- /**\r
- * Get the key store file\r
- */\r
- public String getKSFile() {\r
- return(ksfile);\r
- }\r
- /**\r
- * Get the key store password\r
- */\r
- public String getKSPass() {\r
- return(kspass);\r
- }\r
- /**\r
- * Get the key password\r
- */\r
- public String getKPass() {\r
- return(kpass);\r
- }\r
- /**\r
- * Get the http port\r
- */\r
- public int getHttpPort() {\r
- return(gfport);\r
- }\r
- /**\r
- * Get the https port\r
- */\r
- public int getHttpsPort() {\r
- return(svcport);\r
- }\r
- /**\r
- * Get the externally visible https port\r
- */\r
- public int getExtHttpsPort() {\r
- return(port);\r
- }\r
- /**\r
- * Get the external name of this machine\r
- */\r
- public String getMyName() {\r
- return(myname);\r
- }\r
- /**\r
- * Get the number of threads to use for delivery\r
- */\r
- public int getDeliveryThreads() {\r
- return(deliverythreads);\r
- }\r
- /**\r
- * Get the URL for uploading the event log data\r
- */\r
- public String getEventLogUrl() {\r
- return(eventlogurl);\r
- }\r
- /**\r
- * Get the prefix for the names of event log files\r
- */\r
- public String getEventLogPrefix() {\r
- return(eventlogprefix);\r
- }\r
- /**\r
- * Get the suffix for the names of the event log files\r
- */\r
- public String getEventLogSuffix() {\r
- return(eventlogsuffix);\r
- }\r
- /**\r
- * Get the interval between event log file rollovers\r
- */\r
- public String getEventLogInterval() {\r
- return(eventloginterval);\r
- }\r
- /**\r
- * Should I follow redirects from subscribers?\r
- */\r
- public boolean isFollowRedirects() {\r
- return(followredirects);\r
- }\r
- /**\r
- * Get the directory where the event and node log files live\r
- */\r
- public String getLogDir() {\r
- return(logdir);\r
- }\r
- /**\r
- * How long do I keep log files (in milliseconds)\r
- */\r
- public long getLogRetention() {\r
- return(logretention);\r
- }\r
- /**\r
- * Get the timer\r
- */\r
- public Timer getTimer() {\r
- return(timer);\r
- }\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid) {\r
- return(config.getFeedId(subid));\r
- }\r
- /**\r
- * Get the authorization string this node uses\r
- * @return The Authorization string for this node\r
- */\r
- public String getMyAuth() {\r
- return(config.getMyAuth());\r
- }\r
- /**\r
- * Get the fraction of free spool disk space where we start throwing away undelivered files. This is FREE_DISK_RED_PERCENT / 100.0. Default is 0.05. Limited by 0.01 <= FreeDiskStart <= 0.5.\r
- */\r
- public double getFreeDiskStart() {\r
- return(fdpstart);\r
- }\r
- /**\r
- * Get the fraction of free spool disk space where we stop throwing away undelivered files. This is FREE_DISK_YELLOW_PERCENT / 100.0. Default is 0.2. Limited by FreeDiskStart <= FreeDiskStop <= 0.5.\r
- */\r
- public double getFreeDiskStop() {\r
- return(fdpstop);\r
- }\r
- /**\r
- * Get the spool directory for a subscription\r
- */\r
- public String getSpoolDir(String subid, String remoteaddr) {\r
- if (provcheck.isFrom(remoteaddr)) {\r
- String sdir = config.getSpoolDir(subid);\r
- if (sdir != null) {\r
- logger.info("NODE0310 Received subscription reset request for subscription " + subid + " from provisioning server " + remoteaddr);\r
- } else {\r
- logger.info("NODE0311 Received subscription reset request for unknown subscription " + subid + " from provisioning server " + remoteaddr);\r
- }\r
- return(sdir);\r
- } else {\r
- logger.info("NODE0312 Received subscription reset request from unexpected server " + remoteaddr);\r
- return(null);\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.net.*;
+import java.util.*;
+import java.io.*;
+
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+
+/**
+ * Maintain the configuration of a Data Router node
+ * <p>
+ * The NodeConfigManager is the single point of contact for servlet, delivery, event logging, and log retention subsystems to access configuration information. (Log4J has its own configuration mechanism).
+ * <p>
+ * There are two basic sets of configuration data. The
+ * static local configuration data, stored in a local configuration file (created
+ * as part of installation by SWM), and the dynamic global
+ * configuration data fetched from the data router provisioning server.
+ */
+public class NodeConfigManager implements DeliveryQueueHelper {
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");
+ private static NodeConfigManager base = new NodeConfigManager();
+
+ private Timer timer = new Timer("Node Configuration Timer", true);
+ private long maxfailuretimer;
+ private long initfailuretimer;
+ private long expirationtimer;
+ private double failurebackoff;
+ private long fairtimelimit;
+ private int fairfilelimit;
+ private double fdpstart;
+ private double fdpstop;
+ private int deliverythreads;
+ private String provurl;
+ private String provhost;
+ private IsFrom provcheck;
+ private int gfport;
+ private int svcport;
+ private int port;
+ private String spooldir;
+ private String logdir;
+ private long logretention;
+ private String redirfile;
+ private String kstype;
+ private String ksfile;
+ private String kspass;
+ private String kpass;
+ private String tstype;
+ private String tsfile;
+ private String tspass;
+ private String myname;
+ private RedirManager rdmgr;
+ private RateLimitedOperation pfetcher;
+ private NodeConfig config;
+ private File quiesce;
+ private PublishId pid;
+ private String nak;
+ private TaskList configtasks = new TaskList();
+ private String eventlogurl;
+ private String eventlogprefix;
+ private String eventlogsuffix;
+ private String eventloginterval;
+ private boolean followredirects;
+
+
+ /**
+ * Get the default node configuration manager
+ */
+ public static NodeConfigManager getInstance() {
+ return (base);
+ }
+
+ /**
+ * Initialize the configuration of a Data Router node
+ */
+ private NodeConfigManager() {
+ Properties p = new Properties();
+ try {
+ p.load(new FileInputStream(System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties")));
+ } catch (Exception e) {
+
+ NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");
+ eelflogger.error(EelfMsgs.MESSAGE_PROPERTIES_LOAD_ERROR);
+ logger.error("NODE0301 Unable to load local configuration file " + System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties"), e);
+ }
+ provurl = p.getProperty("ProvisioningURL", "https://feeds-drtr.web.att.com/internal/prov");
+ try {
+ provhost = (new URL(provurl)).getHost();
+ } catch (Exception e) {
+ NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");
+ eelflogger.error(EelfMsgs.MESSAGE_BAD_PROV_URL, provurl);
+ logger.error("NODE0302 Bad provisioning server URL " + provurl);
+ System.exit(1);
+ }
+ logger.info("NODE0303 Provisioning server is " + provhost);
+ eventlogurl = p.getProperty("LogUploadURL", "https://feeds-drtr.web.att.com/internal/logs");
+ provcheck = new IsFrom(provhost);
+ gfport = Integer.parseInt(p.getProperty("IntHttpPort", "8080"));
+ svcport = Integer.parseInt(p.getProperty("IntHttpsPort", "8443"));
+ port = Integer.parseInt(p.getProperty("ExtHttpsPort", "443"));
+ long minpfinterval = Long.parseLong(p.getProperty("MinProvFetchInterval", "10000"));
+ long minrsinterval = Long.parseLong(p.getProperty("MinRedirSaveInterval", "10000"));
+ spooldir = p.getProperty("SpoolDir", "spool");
+ File fdir = new File(spooldir + "/f");
+ fdir.mkdirs();
+ for (File junk : fdir.listFiles()) {
+ if (junk.isFile()) {
+ junk.delete();
+ }
+ }
+ logdir = p.getProperty("LogDir", "logs");
+ (new File(logdir)).mkdirs();
+ logretention = Long.parseLong(p.getProperty("LogRetention", "30")) * 86400000L;
+ eventlogprefix = logdir + "/events";
+ eventlogsuffix = ".log";
+ String redirfile = p.getProperty("RedirectionFile", "etc/redirections.dat");
+ kstype = p.getProperty("KeyStoreType", "jks");
+ ksfile = p.getProperty("KeyStoreFile", "etc/keystore");
+ kspass = p.getProperty("KeyStorePassword", "changeme");
+ kpass = p.getProperty("KeyPassword", "changeme");
+ tstype = p.getProperty("TrustStoreType", "jks");
+ tsfile = p.getProperty("TrustStoreFile");
+ tspass = p.getProperty("TrustStorePassword", "changeme");
+ if (tsfile != null && tsfile.length() > 0) {
+ System.setProperty("javax.net.ssl.trustStoreType", tstype);
+ System.setProperty("javax.net.ssl.trustStore", tsfile);
+ System.setProperty("javax.net.ssl.trustStorePassword", tspass);
+ }
+ nak = p.getProperty("NodeAuthKey", "Node123!");
+ quiesce = new File(p.getProperty("QuiesceFile", "etc/SHUTDOWN"));
+ myname = NodeUtils.getCanonicalName(kstype, ksfile, kspass);
+ if (myname == null) {
+ NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");
+ eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_FETCH_ERROR, ksfile);
+ logger.error("NODE0309 Unable to fetch canonical name from keystore file " + ksfile);
+ System.exit(1);
+ }
+ logger.info("NODE0304 My certificate says my name is " + myname);
+ pid = new PublishId(myname);
+ rdmgr = new RedirManager(redirfile, minrsinterval, timer);
+ pfetcher = new RateLimitedOperation(minpfinterval, timer) {
+ public void run() {
+ fetchconfig();
+ }
+ };
+ logger.info("NODE0305 Attempting to fetch configuration at " + provurl);
+ pfetcher.request();
+ }
+
+ private void localconfig() {
+ followredirects = Boolean.parseBoolean(getProvParam("FOLLOW_REDIRECTS", "false"));
+ eventloginterval = getProvParam("LOGROLL_INTERVAL", "5m");
+ initfailuretimer = 10000;
+ maxfailuretimer = 3600000;
+ expirationtimer = 86400000;
+ failurebackoff = 2.0;
+ deliverythreads = 40;
+ fairfilelimit = 100;
+ fairtimelimit = 60000;
+ fdpstart = 0.05;
+ fdpstop = 0.2;
+ try {
+ initfailuretimer = (long) (Double.parseDouble(getProvParam("DELIVERY_INIT_RETRY_INTERVAL")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ maxfailuretimer = (long) (Double.parseDouble(getProvParam("DELIVERY_MAX_RETRY_INTERVAL")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ expirationtimer = (long) (Double.parseDouble(getProvParam("DELIVERY_MAX_AGE")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ failurebackoff = Double.parseDouble(getProvParam("DELIVERY_RETRY_RATIO"));
+ } catch (Exception e) {
+ }
+ try {
+ deliverythreads = Integer.parseInt(getProvParam("DELIVERY_THREADS"));
+ } catch (Exception e) {
+ }
+ try {
+ fairfilelimit = Integer.parseInt(getProvParam("FAIR_FILE_LIMIT"));
+ } catch (Exception e) {
+ }
+ try {
+ fairtimelimit = (long) (Double.parseDouble(getProvParam("FAIR_TIME_LIMIT")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ fdpstart = Double.parseDouble(getProvParam("FREE_DISK_RED_PERCENT")) / 100.0;
+ } catch (Exception e) {
+ }
+ try {
+ fdpstop = Double.parseDouble(getProvParam("FREE_DISK_YELLOW_PERCENT")) / 100.0;
+ } catch (Exception e) {
+ }
+ if (fdpstart < 0.01) {
+ fdpstart = 0.01;
+ }
+ if (fdpstart > 0.5) {
+ fdpstart = 0.5;
+ }
+ if (fdpstop < fdpstart) {
+ fdpstop = fdpstart;
+ }
+ if (fdpstop > 0.5) {
+ fdpstop = 0.5;
+ }
+ }
+
+ private void fetchconfig() {
+ try {
+ System.out.println("provurl:: " + provurl);
+ Reader r = new InputStreamReader((new URL(provurl)).openStream());
+ config = new NodeConfig(new ProvData(r), myname, spooldir, port, nak);
+ localconfig();
+ configtasks.startRun();
+ Runnable rr;
+ while ((rr = configtasks.next()) != null) {
+ try {
+ rr.run();
+ } catch (Exception e) {
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ NodeUtils.setIpAndFqdnForEelf("fetchconfigs");
+ eelflogger.error(EelfMsgs.MESSAGE_CONF_FAILED, e.toString());
+ logger.error("NODE0306 Configuration failed " + e.toString() + " - try again later", e);
+ pfetcher.request();
+ }
+ }
+
+ /**
+ * Process a gofetch request from a particular IP address. If the
+ * IP address is not an IP address we would go to to fetch the
+ * provisioning data, ignore the request. If the data has been
+ * fetched very recently (default 10 seconds), wait a while before fetching again.
+ */
+ public synchronized void gofetch(String remoteaddr) {
+ if (provcheck.isFrom(remoteaddr)) {
+ logger.info("NODE0307 Received configuration fetch request from provisioning server " + remoteaddr);
+ pfetcher.request();
+ } else {
+ logger.info("NODE0308 Received configuration fetch request from unexpected server " + remoteaddr);
+ }
+ }
+
+ /**
+ * Am I configured?
+ */
+ public boolean isConfigured() {
+ return (config != null);
+ }
+
+ /**
+ * Am I shut down?
+ */
+ public boolean isShutdown() {
+ return (quiesce.exists());
+ }
+
+ /**
+ * Given a routing string, get the targets.
+ *
+ * @param routing Target string
+ * @return array of targets
+ */
+ public Target[] parseRouting(String routing) {
+ return (config.parseRouting(routing));
+ }
+
+ /**
+ * Given a set of credentials and an IP address, is this request from another node?
+ *
+ * @param credentials Credentials offered by the supposed node
+ * @param ip IP address the request came from
+ * @return If the credentials and IP address are recognized, true, otherwise false.
+ */
+ public boolean isAnotherNode(String credentials, String ip) {
+ return (config.isAnotherNode(credentials, ip));
+ }
+
+ /**
+ * Check whether publication is allowed.
+ *
+ * @param feedid The ID of the feed being requested
+ * @param credentials The offered credentials
+ * @param ip The requesting IP address
+ * @return True if the IP and credentials are valid for the specified feed.
+ */
+ public String isPublishPermitted(String feedid, String credentials, String ip) {
+ return (config.isPublishPermitted(feedid, credentials, ip));
+ }
+
+ /**
+ * Check who the user is given the feed ID and the offered credentials.
+ *
+ * @param feedid The ID of the feed specified
+ * @param credentials The offered credentials
+ * @return Null if the credentials are invalid or the user if they are valid.
+ */
+ public String getAuthUser(String feedid, String credentials) {
+ return (config.getAuthUser(feedid, credentials));
+ }
+
+ /**
+ * Check if the publish request should be sent to another node based on the feedid, user, and source IP address.
+ *
+ * @param feedid The ID of the feed specified
+ * @param user The publishing user
+ * @param ip The IP address of the publish endpoint
+ * @return Null if the request should be accepted or the correct hostname if it should be sent to another node.
+ */
+ public String getIngressNode(String feedid, String user, String ip) {
+ return (config.getIngressNode(feedid, user, ip));
+ }
+
+ /**
+ * Get a provisioned configuration parameter (from the provisioning server configuration)
+ *
+ * @param name The name of the parameter
+ * @return The value of the parameter or null if it is not defined.
+ */
+ public String getProvParam(String name) {
+ return (config.getProvParam(name));
+ }
+
+ /**
+ * Get a provisioned configuration parameter (from the provisioning server configuration)
+ *
+ * @param name The name of the parameter
+ * @param deflt The value to use if the parameter is not defined
+ * @return The value of the parameter or deflt if it is not defined.
+ */
+ public String getProvParam(String name, String deflt) {
+ name = config.getProvParam(name);
+ if (name == null) {
+ name = deflt;
+ }
+ return (name);
+ }
+
+ /**
+ * Generate a publish ID
+ */
+ public String getPublishId() {
+ return (pid.next());
+ }
+
+ /**
+ * Get all the outbound spooling destinations.
+ * This will include both subscriptions and nodes.
+ */
+ public DestInfo[] getAllDests() {
+ return (config.getAllDests());
+ }
+
+ /**
+ * Register a task to run whenever the configuration changes
+ */
+ public void registerConfigTask(Runnable task) {
+ configtasks.addTask(task);
+ }
+
+ /**
+ * Deregister a task to run whenever the configuration changes
+ */
+ public void deregisterConfigTask(Runnable task) {
+ configtasks.removeTask(task);
+ }
+
+ /**
+ * Get the URL to deliver a message to.
+ *
+ * @param destinfo The destination information
+ * @param fileid The file ID
+ * @return The URL to deliver to
+ */
+ public String getDestURL(DestInfo destinfo, String fileid) {
+ String subid = destinfo.getSubId();
+ String purl = destinfo.getURL();
+ if (followredirects && subid != null) {
+ purl = rdmgr.lookup(subid, purl);
+ }
+ return (purl + "/" + fileid);
+ }
+
+ /**
+ * Is a destination redirected?
+ */
+ public boolean isDestRedirected(DestInfo destinfo) {
+ return (followredirects && rdmgr.isRedirected(destinfo.getSubId()));
+ }
+
+ /**
+ * Set up redirection on receipt of a 3XX from a target URL
+ */
+ public boolean handleRedirection(DestInfo destinfo, String redirto, String fileid) {
+ fileid = "/" + fileid;
+ String subid = destinfo.getSubId();
+ String purl = destinfo.getURL();
+ if (followredirects && subid != null && redirto.endsWith(fileid)) {
+ redirto = redirto.substring(0, redirto.length() - fileid.length());
+ if (!redirto.equals(purl)) {
+ rdmgr.redirect(subid, purl, redirto);
+ return (true);
+ }
+ }
+ return (false);
+ }
+
+ /**
+ * Handle unreachable target URL
+ */
+ public void handleUnreachable(DestInfo destinfo) {
+ String subid = destinfo.getSubId();
+ if (followredirects && subid != null) {
+ rdmgr.forget(subid);
+ }
+ }
+
+ /**
+ * Get the timeout before retrying after an initial delivery failure
+ */
+ public long getInitFailureTimer() {
+ return (initfailuretimer);
+ }
+
+ /**
+ * Get the maximum timeout between delivery attempts
+ */
+ public long getMaxFailureTimer() {
+ return (maxfailuretimer);
+ }
+
+ /**
+ * Get the ratio between consecutive delivery attempts
+ */
+ public double getFailureBackoff() {
+ return (failurebackoff);
+ }
+
+ /**
+ * Get the expiration timer for deliveries
+ */
+ public long getExpirationTimer() {
+ return (expirationtimer);
+ }
+
+ /**
+ * Get the maximum number of file delivery attempts before checking
+ * if another queue has work to be performed.
+ */
+ public int getFairFileLimit() {
+ return (fairfilelimit);
+ }
+
+ /**
+ * Get the maximum amount of time spent delivering files before
+ * checking if another queue has work to be performed.
+ */
+ public long getFairTimeLimit() {
+ return (fairtimelimit);
+ }
+
+ /**
+ * Get the targets for a feed
+ *
+ * @param feedid The feed ID
+ * @return The targets this feed should be delivered to
+ */
+ public Target[] getTargets(String feedid) {
+ return (config.getTargets(feedid));
+ }
+
+ /**
+ * Get the spool directory for temporary files
+ */
+ public String getSpoolDir() {
+ return (spooldir + "/f");
+ }
+
+ /**
+ * Get the base directory for spool directories
+ */
+ public String getSpoolBase() {
+ return (spooldir);
+ }
+
+ /**
+ * Get the key store type
+ */
+ public String getKSType() {
+ return (kstype);
+ }
+
+ /**
+ * Get the key store file
+ */
+ public String getKSFile() {
+ return (ksfile);
+ }
+
+ /**
+ * Get the key store password
+ */
+ public String getKSPass() {
+ return (kspass);
+ }
+
+ /**
+ * Get the key password
+ */
+ public String getKPass() {
+ return (kpass);
+ }
+
+ /**
+ * Get the http port
+ */
+ public int getHttpPort() {
+ return (gfport);
+ }
+
+ /**
+ * Get the https port
+ */
+ public int getHttpsPort() {
+ return (svcport);
+ }
+
+ /**
+ * Get the externally visible https port
+ */
+ public int getExtHttpsPort() {
+ return (port);
+ }
+
+ /**
+ * Get the external name of this machine
+ */
+ public String getMyName() {
+ return (myname);
+ }
+
+ /**
+ * Get the number of threads to use for delivery
+ */
+ public int getDeliveryThreads() {
+ return (deliverythreads);
+ }
+
+ /**
+ * Get the URL for uploading the event log data
+ */
+ public String getEventLogUrl() {
+ return (eventlogurl);
+ }
+
+ /**
+ * Get the prefix for the names of event log files
+ */
+ public String getEventLogPrefix() {
+ return (eventlogprefix);
+ }
+
+ /**
+ * Get the suffix for the names of the event log files
+ */
+ public String getEventLogSuffix() {
+ return (eventlogsuffix);
+ }
+
+ /**
+ * Get the interval between event log file rollovers
+ */
+ public String getEventLogInterval() {
+ return (eventloginterval);
+ }
+
+ /**
+ * Should I follow redirects from subscribers?
+ */
+ public boolean isFollowRedirects() {
+ return (followredirects);
+ }
+
+ /**
+ * Get the directory where the event and node log files live
+ */
+ public String getLogDir() {
+ return (logdir);
+ }
+
+ /**
+ * How long do I keep log files (in milliseconds)
+ */
+ public long getLogRetention() {
+ return (logretention);
+ }
+
+ /**
+ * Get the timer
+ */
+ public Timer getTimer() {
+ return (timer);
+ }
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid) {
+ return (config.getFeedId(subid));
+ }
+
+ /**
+ * Get the authorization string this node uses
+ *
+ * @return The Authorization string for this node
+ */
+ public String getMyAuth() {
+ return (config.getMyAuth());
+ }
+
+ /**
+ * Get the fraction of free spool disk space where we start throwing away undelivered files. This is FREE_DISK_RED_PERCENT / 100.0. Default is 0.05. Limited by 0.01 <= FreeDiskStart <= 0.5.
+ */
+ public double getFreeDiskStart() {
+ return (fdpstart);
+ }
+
+ /**
+ * Get the fraction of free spool disk space where we stop throwing away undelivered files. This is FREE_DISK_YELLOW_PERCENT / 100.0. Default is 0.2. Limited by FreeDiskStart <= FreeDiskStop <= 0.5.
+ */
+ public double getFreeDiskStop() {
+ return (fdpstop);
+ }
+
+ /**
+ * Get the spool directory for a subscription
+ */
+ public String getSpoolDir(String subid, String remoteaddr) {
+ if (provcheck.isFrom(remoteaddr)) {
+ String sdir = config.getSpoolDir(subid);
+ if (sdir != null) {
+ logger.info("NODE0310 Received subscription reset request for subscription " + subid + " from provisioning server " + remoteaddr);
+ } else {
+ logger.info("NODE0311 Received subscription reset request for unknown subscription " + subid + " from provisioning server " + remoteaddr);
+ }
+ return (sdir);
+ } else {
+ logger.info("NODE0312 Received subscription reset request from unexpected server " + remoteaddr);
+ return (null);
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import org.eclipse.jetty.servlet.*;\r
-import org.eclipse.jetty.util.ssl.*;\r
-import org.eclipse.jetty.server.*;\r
-import org.eclipse.jetty.server.nio.*;\r
-import org.eclipse.jetty.server.ssl.*;\r
-import org.apache.log4j.Logger;\r
-\r
-/**\r
- * The main starting point for the Data Router node\r
- */\r
-public class NodeMain {\r
- private NodeMain() {}\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeMain");\r
- private static class wfconfig implements Runnable {\r
- private NodeConfigManager ncm;\r
- public wfconfig(NodeConfigManager ncm) {\r
- this.ncm = ncm;\r
- }\r
- public synchronized void run() {\r
- notify();\r
- }\r
- public synchronized void waitforconfig() {\r
- ncm.registerConfigTask(this);\r
- while (!ncm.isConfigured()) {\r
- logger.info("NODE0003 Waiting for Node Configuration");\r
- try {\r
- wait();\r
- } catch (Exception e) {\r
- }\r
- }\r
- ncm.deregisterConfigTask(this);\r
- logger.info("NODE0004 Node Configuration Data Received");\r
- }\r
- }\r
- private static Delivery d;\r
- private static NodeConfigManager ncm;\r
- /**\r
- * Reset the retry timer for a subscription\r
- */\r
- public static void resetQueue(String subid, String ip) {\r
- d.resetQueue(ncm.getSpoolDir(subid, ip));\r
- }\r
- /**\r
- * Start the data router.\r
- * <p>\r
- * The location of the node configuration file can be set using the\r
- * org.onap.dmaap.datarouter.node.ConfigFile system property. By\r
- * default, it is "etc/node.properties".\r
- */\r
- public static void main(String[] args) throws Exception {\r
- logger.info("NODE0001 Data Router Node Starting");\r
- IsFrom.setDNSCache();\r
- ncm = NodeConfigManager.getInstance();\r
- logger.info("NODE0002 I am " + ncm.getMyName());\r
- (new wfconfig(ncm)).waitforconfig();\r
- d = new Delivery(ncm);\r
- LogManager lm = new LogManager(ncm);\r
- Server server = new Server();\r
- SelectChannelConnector http = new SelectChannelConnector();\r
- http.setPort(ncm.getHttpPort());\r
- http.setMaxIdleTime(2000);\r
- http.setRequestHeaderSize(2048);\r
- SslSelectChannelConnector https = new SslSelectChannelConnector();\r
- https.setPort(ncm.getHttpsPort());\r
- https.setMaxIdleTime(30000);\r
- https.setRequestHeaderSize(8192);\r
- SslContextFactory cf = https.getSslContextFactory();\r
- \r
- /**Skip SSLv3 Fixes*/\r
- cf.addExcludeProtocols("SSLv3");\r
- logger.info("Excluded protocols node-"+cf.getExcludeProtocols());\r
- /**End of SSLv3 Fixes*/\r
-\r
- cf.setKeyStoreType(ncm.getKSType());\r
- cf.setKeyStorePath(ncm.getKSFile());\r
- cf.setKeyStorePassword(ncm.getKSPass());\r
- cf.setKeyManagerPassword(ncm.getKPass());\r
- server.setConnectors(new Connector[] { http, https });\r
- ServletContextHandler ctxt = new ServletContextHandler(0);\r
- ctxt.setContextPath("/");\r
- server.setHandler(ctxt);\r
- ctxt.addServlet(new ServletHolder(new NodeServlet()), "/*");\r
- logger.info("NODE0005 Data Router Node Activating Service");\r
- server.start();\r
- server.join();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import org.eclipse.jetty.servlet.*;
+import org.eclipse.jetty.util.ssl.*;
+import org.eclipse.jetty.server.*;
+import org.eclipse.jetty.server.nio.*;
+import org.eclipse.jetty.server.ssl.*;
+import org.apache.log4j.Logger;
+
+/**
+ * The main starting point for the Data Router node
+ */
+public class NodeMain {
+ private NodeMain() {
+ }
+
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeMain");
+
+ private static class wfconfig implements Runnable {
+ private NodeConfigManager ncm;
+
+ public wfconfig(NodeConfigManager ncm) {
+ this.ncm = ncm;
+ }
+
+ public synchronized void run() {
+ notify();
+ }
+
+ public synchronized void waitforconfig() {
+ ncm.registerConfigTask(this);
+ while (!ncm.isConfigured()) {
+ logger.info("NODE0003 Waiting for Node Configuration");
+ try {
+ wait();
+ } catch (Exception e) {
+ }
+ }
+ ncm.deregisterConfigTask(this);
+ logger.info("NODE0004 Node Configuration Data Received");
+ }
+ }
+
+ private static Delivery d;
+ private static NodeConfigManager ncm;
+
+ /**
+ * Reset the retry timer for a subscription
+ */
+ public static void resetQueue(String subid, String ip) {
+ d.resetQueue(ncm.getSpoolDir(subid, ip));
+ }
+
+ /**
+ * Start the data router.
+ * <p>
+ * The location of the node configuration file can be set using the
+ * org.onap.dmaap.datarouter.node.ConfigFile system property. By
+ * default, it is "etc/node.properties".
+ */
+ public static void main(String[] args) throws Exception {
+ logger.info("NODE0001 Data Router Node Starting");
+ IsFrom.setDNSCache();
+ ncm = NodeConfigManager.getInstance();
+ logger.info("NODE0002 I am " + ncm.getMyName());
+ (new wfconfig(ncm)).waitforconfig();
+ d = new Delivery(ncm);
+ LogManager lm = new LogManager(ncm);
+ Server server = new Server();
+ SelectChannelConnector http = new SelectChannelConnector();
+ http.setPort(ncm.getHttpPort());
+ http.setMaxIdleTime(2000);
+ http.setRequestHeaderSize(2048);
+ SslSelectChannelConnector https = new SslSelectChannelConnector();
+ https.setPort(ncm.getHttpsPort());
+ https.setMaxIdleTime(30000);
+ https.setRequestHeaderSize(8192);
+ SslContextFactory cf = https.getSslContextFactory();
+
+ /**Skip SSLv3 Fixes*/
+ cf.addExcludeProtocols("SSLv3");
+ logger.info("Excluded protocols node-" + cf.getExcludeProtocols());
+ /**End of SSLv3 Fixes*/
+
+ cf.setKeyStoreType(ncm.getKSType());
+ cf.setKeyStorePath(ncm.getKSFile());
+ cf.setKeyStorePassword(ncm.getKSPass());
+ cf.setKeyManagerPassword(ncm.getKPass());
+ server.setConnectors(new Connector[]{http, https});
+ ServletContextHandler ctxt = new ServletContextHandler(0);
+ ctxt.setContextPath("/");
+ server.setHandler(ctxt);
+ ctxt.addServlet(new ServletHolder(new NodeServlet()), "/*");
+ logger.info("NODE0005 Data Router Node Activating Service");
+ server.start();
+ server.join();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import javax.servlet.*;\r
-import javax.servlet.http.*;\r
-import java.util.*;\r
-import java.util.regex.*;\r
-import java.io.*;\r
-import java.nio.file.*;\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-import java.net.*;\r
-\r
-/**\r
- * Servlet for handling all http and https requests to the data router node\r
- * <p>\r
- * Handled requests are:\r
- * <br>\r
- * GET http://<i>node</i>/internal/fetchProv - fetch the provisioning data\r
- * <br>\r
- * PUT/DELETE https://<i>node</i>/internal/publish/<i>fileid</i> - n2n transfer\r
- * <br>\r
- * PUT/DELETE https://<i>node</i>/publish/<i>feedid</i>/<i>fileid</i> - publsh request\r
- */\r
-public class NodeServlet extends HttpServlet {\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeServlet");\r
- private static NodeConfigManager config;\r
- private static Pattern MetaDataPattern;\r
- private static SubnetMatcher internalsubnet = new SubnetMatcher("135.207.136.128/25");\r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeServlet");\r
-\r
- static {\r
- try {\r
- String ws = "\\s*";\r
- // assume that \\ and \" have been replaced by X\r
- String string = "\"[^\"]*\"";\r
- //String string = "\"(?:[^\"\\\\]|\\\\.)*\"";\r
- String number = "[+-]?(?:\\.\\d+|(?:0|[1-9]\\d*)(?:\\.\\d*)?)(?:[eE][+-]?\\d+)?";\r
- String value = "(?:" + string + "|" + number + "|null|true|false)";\r
- String item = string + ws + ":" + ws + value + ws;\r
- String object = ws + "\\{" + ws + "(?:" + item + "(?:" + "," + ws + item + ")*)?\\}" + ws;\r
- MetaDataPattern = Pattern.compile(object, Pattern.DOTALL);\r
- } catch (Exception e) {\r
- }\r
- }\r
- /**\r
- * Get the NodeConfigurationManager\r
- */\r
- public void init() {\r
- config = NodeConfigManager.getInstance();\r
- logger.info("NODE0101 Node Servlet Configured");\r
- }\r
- private boolean down(HttpServletResponse resp) throws IOException {\r
- if (config.isShutdown() || !config.isConfigured()) {\r
- resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);\r
- logger.info("NODE0102 Rejecting request: Service is being quiesced");\r
- return(true);\r
- }\r
- return(false);\r
- }\r
- /**\r
- * Handle a GET for /internal/fetchProv\r
- */\r
- protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\r
- NodeUtils.setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");\r
- if (down(resp)) {\r
- return;\r
- }\r
- String path = req.getPathInfo();\r
- String qs = req.getQueryString();\r
- String ip = req.getRemoteAddr();\r
- if (qs != null) {\r
- path = path + "?" + qs;\r
- }\r
- if ("/internal/fetchProv".equals(path)) {\r
- config.gofetch(ip);\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- return;\r
- } else if (path.startsWith("/internal/resetSubscription/")) {\r
- String subid = path.substring(28);\r
- if (subid.length() != 0 && subid.indexOf('/') == -1) {\r
- NodeMain.resetQueue(subid, ip);\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- return;\r
- }\r
- }\r
- if (internalsubnet.matches(NodeUtils.getInetAddress(ip))) {\r
- if (path.startsWith("/internal/logs/")) {\r
- String f = path.substring(15);\r
- File fn = new File(config.getLogDir() + "/" + f);\r
- if (f.indexOf('/') != -1 || !fn.isFile()) {\r
- logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND);\r
- return;\r
- }\r
- byte[] buf = new byte[65536];\r
- resp.setContentType("text/plain");\r
- resp.setContentLength((int)fn.length());\r
- resp.setStatus(200);\r
- InputStream is = new FileInputStream(fn);\r
- OutputStream os = resp.getOutputStream();\r
- int i;\r
- while ((i = is.read(buf)) > 0) {\r
- os.write(buf, 0, i);\r
- }\r
- is.close();\r
- return;\r
- }\r
- if (path.startsWith("/internal/rtt/")) {\r
- String xip = path.substring(14);\r
- long st = System.currentTimeMillis();\r
- String status = " unknown";\r
- try {\r
- Socket s = new Socket(xip, 443);\r
- s.close();\r
- status = " connected";\r
- } catch (Exception e) {\r
- status = " error " + e.toString();\r
- }\r
- long dur = System.currentTimeMillis() - st;\r
- resp.setContentType("text/plain");\r
- resp.setStatus(200);\r
- byte[] buf = (dur + status + "\n").getBytes();\r
- resp.setContentLength(buf.length);\r
- resp.getOutputStream().write(buf);\r
- return;\r
- }\r
- }\r
- logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND);\r
- return;\r
- }\r
- /**\r
- * Handle all PUT requests\r
- */\r
- protected void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\r
- NodeUtils.setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");\r
- common(req, resp, true);\r
- }\r
- /**\r
- * Handle all DELETE requests\r
- */\r
- protected void doDelete(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\r
- NodeUtils.setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");\r
- common(req, resp, false);\r
- }\r
- private void common(HttpServletRequest req, HttpServletResponse resp, boolean isput) throws ServletException, IOException {\r
- if (down(resp)) {\r
- return;\r
- }\r
- if (!req.isSecure()) {\r
- logger.info("NODE0104 Rejecting insecure PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "https required on publish requests");\r
- return;\r
- }\r
- String fileid = req.getPathInfo();\r
- if (fileid == null) {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");\r
- return;\r
- }\r
- String feedid = null;\r
- String user = null;\r
- String credentials = req.getHeader("Authorization");\r
- if (credentials == null) {\r
- logger.info("NODE0106 Rejecting unauthenticated PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Authorization header required");\r
- return;\r
- }\r
- String ip = req.getRemoteAddr();\r
- String lip = req.getLocalAddr();\r
- String pubid = null;\r
- String xpubid = null;\r
- String rcvd = NodeUtils.logts(System.currentTimeMillis()) + ";from=" + ip + ";by=" + lip;\r
- Target[] targets = null;\r
- if (fileid.startsWith("/publish/")) {\r
- fileid = fileid.substring(9);\r
- int i = fileid.indexOf('/');\r
- if (i == -1 || i == fileid.length() - 1) {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>. Possible missing fileid.");\r
- return;\r
- }\r
- feedid = fileid.substring(0, i);\r
- fileid = fileid.substring(i + 1);\r
- pubid = config.getPublishId();\r
- xpubid = req.getHeader("X-ATT-DR-PUBLISH-ID");\r
- targets = config.getTargets(feedid);\r
- } else if (fileid.startsWith("/internal/publish/")) {\r
- if (!config.isAnotherNode(credentials, ip)) {\r
- logger.info("NODE0107 Rejecting unauthorized node-to-node transfer attempt from " + ip);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN);\r
- return;\r
- }\r
- fileid = fileid.substring(18);\r
- pubid = req.getHeader("X-ATT-DR-PUBLISH-ID");\r
- targets = config.parseRouting(req.getHeader("X-ATT-DR-ROUTING"));\r
- } else {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");\r
- return;\r
- }\r
- if (fileid.indexOf('/') != -1) {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");\r
- return;\r
- }\r
- String qs = req.getQueryString();\r
- if (qs != null) {\r
- fileid = fileid + "?" + qs;\r
- }\r
- String hp = config.getMyName();\r
- int xp = config.getExtHttpsPort();\r
- if (xp != 443) {\r
- hp = hp + ":" + xp;\r
- }\r
- String logurl = "https://" + hp + "/internal/publish/" + fileid;\r
- if (feedid != null) {\r
- logurl = "https://" + hp + "/publish/" + feedid + "/" + fileid;\r
- String reason = config.isPublishPermitted(feedid, credentials, ip);\r
- if (reason != null) {\r
- logger.info("NODE0111 Rejecting unauthorized publish attempt to feed " + feedid + " fileid " + fileid + " from " + ip + " reason " + reason);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN,reason);\r
- return;\r
- }\r
- user = config.getAuthUser(feedid, credentials);\r
- String newnode = config.getIngressNode(feedid, user, ip);\r
- if (newnode != null) {\r
- String port = "";\r
- int iport = config.getExtHttpsPort();\r
- if (iport != 443) {\r
- port = ":" + iport;\r
- }\r
- String redirto = "https://" + newnode + port + "/publish/" + feedid + "/" + fileid;\r
- logger.info("NODE0108 Redirecting publish attempt for feed " + feedid + " user " + user + " ip " + ip + " to " + redirto);\r
- resp.sendRedirect(redirto);\r
- return;\r
- }\r
- resp.setHeader("X-ATT-DR-PUBLISH-ID", pubid);\r
- }\r
- String fbase = config.getSpoolDir() + "/" + pubid;\r
- File data = new File(fbase);\r
- File meta = new File(fbase + ".M");\r
- OutputStream dos = null;\r
- Writer mw = null;\r
- InputStream is = null;\r
- try {\r
- StringBuffer mx = new StringBuffer();\r
- mx.append(req.getMethod()).append('\t').append(fileid).append('\n');\r
- Enumeration hnames = req.getHeaderNames();\r
- String ctype = null;\r
- while (hnames.hasMoreElements()) {\r
- String hn = (String)hnames.nextElement();\r
- String hnlc = hn.toLowerCase();\r
- if ((isput && ("content-type".equals(hnlc) ||\r
- "content-language".equals(hnlc) ||\r
- "content-md5".equals(hnlc) ||\r
- "content-range".equals(hnlc))) ||\r
- "x-att-dr-meta".equals(hnlc) ||\r
- (feedid == null && "x-att-dr-received".equals(hnlc)) ||\r
- (hnlc.startsWith("x-") && !hnlc.startsWith("x-att-dr-"))) {\r
- Enumeration hvals = req.getHeaders(hn);\r
- while (hvals.hasMoreElements()) {\r
- String hv = (String)hvals.nextElement();\r
- if ("content-type".equals(hnlc)) {\r
- ctype = hv;\r
- }\r
- if ("x-att-dr-meta".equals(hnlc)) {\r
- if (hv.length() > 4096) {\r
- logger.info("NODE0109 Rejecting publish attempt with metadata too long for feed " + feedid + " user " + user + " ip " + ip);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Metadata too long");\r
- return;\r
- }\r
- if (!MetaDataPattern.matcher(hv.replaceAll("\\\\.", "X")).matches()) {\r
- logger.info("NODE0109 Rejecting publish attempt with malformed metadata for feed " + feedid + " user " + user + " ip " + ip);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Malformed metadata");\r
- return;\r
- }\r
- }\r
- mx.append(hn).append('\t').append(hv).append('\n');\r
- }\r
- }\r
- }\r
- mx.append("X-ATT-DR-RECEIVED\t").append(rcvd).append('\n');\r
- String metadata = mx.toString();\r
- byte[] buf = new byte[1024 * 1024];\r
- int i;\r
- try {\r
- is = req.getInputStream();\r
- dos = new FileOutputStream(data);\r
- while ((i = is.read(buf)) > 0) {\r
- dos.write(buf, 0, i);\r
- }\r
- is.close();\r
- is = null;\r
- dos.close();\r
- dos = null;\r
- } catch (IOException ioe) {\r
- long exlen = -1;\r
- try {\r
- exlen = Long.parseLong(req.getHeader("Content-Length"));\r
- } catch (Exception e) {\r
- }\r
- StatusLog.logPubFail(pubid, feedid, logurl, req.getMethod(), ctype, exlen, data.length(), ip, user, ioe.getMessage());\r
- throw ioe;\r
- }\r
- Path dpath = Paths.get(fbase);\r
- for (Target t: targets) {\r
- DestInfo di = t.getDestInfo();\r
- if (di == null) {\r
- // TODO: unknown destination\r
- continue;\r
- }\r
- String dbase = di.getSpool() + "/" + pubid;\r
- Files.createLink(Paths.get(dbase), dpath);\r
- mw = new FileWriter(meta);\r
- mw.write(metadata);\r
- if (di.getSubId() == null) {\r
- mw.write("X-ATT-DR-ROUTING\t" + t.getRouting() + "\n");\r
- }\r
- mw.close();\r
- meta.renameTo(new File(dbase + ".M"));\r
- }\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- resp.getOutputStream().close();\r
- StatusLog.logPub(pubid, feedid, logurl, req.getMethod(), ctype, data.length(), ip, user, HttpServletResponse.SC_NO_CONTENT);\r
- } catch (IOException ioe) {\r
- logger.info("NODE0110 IO Exception receiving publish attempt for feed " + feedid + " user " + user + " ip " + ip + " " + ioe.toString(), ioe);\r
- throw ioe;\r
- } finally {\r
- if (is != null) { try { is.close(); } catch (Exception e) {}}\r
- if (dos != null) { try { dos.close(); } catch (Exception e) {}}\r
- if (mw != null) { try { mw.close(); } catch (Exception e) {}}\r
- try { data.delete(); } catch (Exception e) {}\r
- try { meta.delete(); } catch (Exception e) {}\r
- }\r
- }\r
- \r
- private int getIdFromPath(HttpServletRequest req) {\r
- String path = req.getPathInfo();\r
- if (path == null || path.length() < 2)\r
- return -1;\r
- try {\r
- return Integer.parseInt(path.substring(1));\r
- } catch (NumberFormatException e) {\r
- return -1;\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import javax.servlet.*;
+import javax.servlet.http.*;
+import java.util.*;
+import java.util.regex.*;
+import java.io.*;
+import java.nio.file.*;
+
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+import java.net.*;
+
+/**
+ * Servlet for handling all http and https requests to the data router node
+ * <p>
+ * Handled requests are:
+ * <br>
+ * GET http://<i>node</i>/internal/fetchProv - fetch the provisioning data
+ * <br>
+ * PUT/DELETE https://<i>node</i>/internal/publish/<i>fileid</i> - n2n transfer
+ * <br>
+ * PUT/DELETE https://<i>node</i>/publish/<i>feedid</i>/<i>fileid</i> - publsh request
+ */
+public class NodeServlet extends HttpServlet {
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeServlet");
+ private static NodeConfigManager config;
+ private static Pattern MetaDataPattern;
+ private static SubnetMatcher internalsubnet = new SubnetMatcher("135.207.136.128/25");
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeServlet");
+
+ static {
+ try {
+ String ws = "\\s*";
+ // assume that \\ and \" have been replaced by X
+ String string = "\"[^\"]*\"";
+ //String string = "\"(?:[^\"\\\\]|\\\\.)*\"";
+ String number = "[+-]?(?:\\.\\d+|(?:0|[1-9]\\d*)(?:\\.\\d*)?)(?:[eE][+-]?\\d+)?";
+ String value = "(?:" + string + "|" + number + "|null|true|false)";
+ String item = string + ws + ":" + ws + value + ws;
+ String object = ws + "\\{" + ws + "(?:" + item + "(?:" + "," + ws + item + ")*)?\\}" + ws;
+ MetaDataPattern = Pattern.compile(object, Pattern.DOTALL);
+ } catch (Exception e) {
+ }
+ }
+
+ /**
+ * Get the NodeConfigurationManager
+ */
+ public void init() {
+ config = NodeConfigManager.getInstance();
+ logger.info("NODE0101 Node Servlet Configured");
+ }
+
+ private boolean down(HttpServletResponse resp) throws IOException {
+ if (config.isShutdown() || !config.isConfigured()) {
+ resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
+ logger.info("NODE0102 Rejecting request: Service is being quiesced");
+ return (true);
+ }
+ return (false);
+ }
+
+ /**
+ * Handle a GET for /internal/fetchProv
+ */
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ NodeUtils.setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"), getIdFromPath(req) + "");
+ if (down(resp)) {
+ return;
+ }
+ String path = req.getPathInfo();
+ String qs = req.getQueryString();
+ String ip = req.getRemoteAddr();
+ if (qs != null) {
+ path = path + "?" + qs;
+ }
+ if ("/internal/fetchProv".equals(path)) {
+ config.gofetch(ip);
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ return;
+ } else if (path.startsWith("/internal/resetSubscription/")) {
+ String subid = path.substring(28);
+ if (subid.length() != 0 && subid.indexOf('/') == -1) {
+ NodeMain.resetQueue(subid, ip);
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ return;
+ }
+ }
+ if (internalsubnet.matches(NodeUtils.getInetAddress(ip))) {
+ if (path.startsWith("/internal/logs/")) {
+ String f = path.substring(15);
+ File fn = new File(config.getLogDir() + "/" + f);
+ if (f.indexOf('/') != -1 || !fn.isFile()) {
+ logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND);
+ return;
+ }
+ byte[] buf = new byte[65536];
+ resp.setContentType("text/plain");
+ resp.setContentLength((int) fn.length());
+ resp.setStatus(200);
+ InputStream is = new FileInputStream(fn);
+ OutputStream os = resp.getOutputStream();
+ int i;
+ while ((i = is.read(buf)) > 0) {
+ os.write(buf, 0, i);
+ }
+ is.close();
+ return;
+ }
+ if (path.startsWith("/internal/rtt/")) {
+ String xip = path.substring(14);
+ long st = System.currentTimeMillis();
+ String status = " unknown";
+ try {
+ Socket s = new Socket(xip, 443);
+ s.close();
+ status = " connected";
+ } catch (Exception e) {
+ status = " error " + e.toString();
+ }
+ long dur = System.currentTimeMillis() - st;
+ resp.setContentType("text/plain");
+ resp.setStatus(200);
+ byte[] buf = (dur + status + "\n").getBytes();
+ resp.setContentLength(buf.length);
+ resp.getOutputStream().write(buf);
+ return;
+ }
+ }
+ logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND);
+ return;
+ }
+
+ /**
+ * Handle all PUT requests
+ */
+ protected void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ NodeUtils.setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"), getIdFromPath(req) + "");
+ common(req, resp, true);
+ }
+
+ /**
+ * Handle all DELETE requests
+ */
+ protected void doDelete(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ NodeUtils.setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"), getIdFromPath(req) + "");
+ common(req, resp, false);
+ }
+
+ private void common(HttpServletRequest req, HttpServletResponse resp, boolean isput) throws ServletException, IOException {
+ if (down(resp)) {
+ return;
+ }
+ if (!req.isSecure()) {
+ logger.info("NODE0104 Rejecting insecure PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "https required on publish requests");
+ return;
+ }
+ String fileid = req.getPathInfo();
+ if (fileid == null) {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");
+ return;
+ }
+ String feedid = null;
+ String user = null;
+ String credentials = req.getHeader("Authorization");
+ if (credentials == null) {
+ logger.info("NODE0106 Rejecting unauthenticated PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Authorization header required");
+ return;
+ }
+ String ip = req.getRemoteAddr();
+ String lip = req.getLocalAddr();
+ String pubid = null;
+ String xpubid = null;
+ String rcvd = NodeUtils.logts(System.currentTimeMillis()) + ";from=" + ip + ";by=" + lip;
+ Target[] targets = null;
+ if (fileid.startsWith("/publish/")) {
+ fileid = fileid.substring(9);
+ int i = fileid.indexOf('/');
+ if (i == -1 || i == fileid.length() - 1) {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>. Possible missing fileid.");
+ return;
+ }
+ feedid = fileid.substring(0, i);
+ fileid = fileid.substring(i + 1);
+ pubid = config.getPublishId();
+ xpubid = req.getHeader("X-ATT-DR-PUBLISH-ID");
+ targets = config.getTargets(feedid);
+ } else if (fileid.startsWith("/internal/publish/")) {
+ if (!config.isAnotherNode(credentials, ip)) {
+ logger.info("NODE0107 Rejecting unauthorized node-to-node transfer attempt from " + ip);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN);
+ return;
+ }
+ fileid = fileid.substring(18);
+ pubid = req.getHeader("X-ATT-DR-PUBLISH-ID");
+ targets = config.parseRouting(req.getHeader("X-ATT-DR-ROUTING"));
+ } else {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");
+ return;
+ }
+ if (fileid.indexOf('/') != -1) {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");
+ return;
+ }
+ String qs = req.getQueryString();
+ if (qs != null) {
+ fileid = fileid + "?" + qs;
+ }
+ String hp = config.getMyName();
+ int xp = config.getExtHttpsPort();
+ if (xp != 443) {
+ hp = hp + ":" + xp;
+ }
+ String logurl = "https://" + hp + "/internal/publish/" + fileid;
+ if (feedid != null) {
+ logurl = "https://" + hp + "/publish/" + feedid + "/" + fileid;
+ String reason = config.isPublishPermitted(feedid, credentials, ip);
+ if (reason != null) {
+ logger.info("NODE0111 Rejecting unauthorized publish attempt to feed " + feedid + " fileid " + fileid + " from " + ip + " reason " + reason);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, reason);
+ return;
+ }
+ user = config.getAuthUser(feedid, credentials);
+ String newnode = config.getIngressNode(feedid, user, ip);
+ if (newnode != null) {
+ String port = "";
+ int iport = config.getExtHttpsPort();
+ if (iport != 443) {
+ port = ":" + iport;
+ }
+ String redirto = "https://" + newnode + port + "/publish/" + feedid + "/" + fileid;
+ logger.info("NODE0108 Redirecting publish attempt for feed " + feedid + " user " + user + " ip " + ip + " to " + redirto);
+ resp.sendRedirect(redirto);
+ return;
+ }
+ resp.setHeader("X-ATT-DR-PUBLISH-ID", pubid);
+ }
+ String fbase = config.getSpoolDir() + "/" + pubid;
+ File data = new File(fbase);
+ File meta = new File(fbase + ".M");
+ OutputStream dos = null;
+ Writer mw = null;
+ InputStream is = null;
+ try {
+ StringBuffer mx = new StringBuffer();
+ mx.append(req.getMethod()).append('\t').append(fileid).append('\n');
+ Enumeration hnames = req.getHeaderNames();
+ String ctype = null;
+ while (hnames.hasMoreElements()) {
+ String hn = (String) hnames.nextElement();
+ String hnlc = hn.toLowerCase();
+ if ((isput && ("content-type".equals(hnlc) ||
+ "content-language".equals(hnlc) ||
+ "content-md5".equals(hnlc) ||
+ "content-range".equals(hnlc))) ||
+ "x-att-dr-meta".equals(hnlc) ||
+ (feedid == null && "x-att-dr-received".equals(hnlc)) ||
+ (hnlc.startsWith("x-") && !hnlc.startsWith("x-att-dr-"))) {
+ Enumeration hvals = req.getHeaders(hn);
+ while (hvals.hasMoreElements()) {
+ String hv = (String) hvals.nextElement();
+ if ("content-type".equals(hnlc)) {
+ ctype = hv;
+ }
+ if ("x-att-dr-meta".equals(hnlc)) {
+ if (hv.length() > 4096) {
+ logger.info("NODE0109 Rejecting publish attempt with metadata too long for feed " + feedid + " user " + user + " ip " + ip);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Metadata too long");
+ return;
+ }
+ if (!MetaDataPattern.matcher(hv.replaceAll("\\\\.", "X")).matches()) {
+ logger.info("NODE0109 Rejecting publish attempt with malformed metadata for feed " + feedid + " user " + user + " ip " + ip);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Malformed metadata");
+ return;
+ }
+ }
+ mx.append(hn).append('\t').append(hv).append('\n');
+ }
+ }
+ }
+ mx.append("X-ATT-DR-RECEIVED\t").append(rcvd).append('\n');
+ String metadata = mx.toString();
+ byte[] buf = new byte[1024 * 1024];
+ int i;
+ try {
+ is = req.getInputStream();
+ dos = new FileOutputStream(data);
+ while ((i = is.read(buf)) > 0) {
+ dos.write(buf, 0, i);
+ }
+ is.close();
+ is = null;
+ dos.close();
+ dos = null;
+ } catch (IOException ioe) {
+ long exlen = -1;
+ try {
+ exlen = Long.parseLong(req.getHeader("Content-Length"));
+ } catch (Exception e) {
+ }
+ StatusLog.logPubFail(pubid, feedid, logurl, req.getMethod(), ctype, exlen, data.length(), ip, user, ioe.getMessage());
+ throw ioe;
+ }
+ Path dpath = Paths.get(fbase);
+ for (Target t : targets) {
+ DestInfo di = t.getDestInfo();
+ if (di == null) {
+ // TODO: unknown destination
+ continue;
+ }
+ String dbase = di.getSpool() + "/" + pubid;
+ Files.createLink(Paths.get(dbase), dpath);
+ mw = new FileWriter(meta);
+ mw.write(metadata);
+ if (di.getSubId() == null) {
+ mw.write("X-ATT-DR-ROUTING\t" + t.getRouting() + "\n");
+ }
+ mw.close();
+ meta.renameTo(new File(dbase + ".M"));
+ }
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ resp.getOutputStream().close();
+ StatusLog.logPub(pubid, feedid, logurl, req.getMethod(), ctype, data.length(), ip, user, HttpServletResponse.SC_NO_CONTENT);
+ } catch (IOException ioe) {
+ logger.info("NODE0110 IO Exception receiving publish attempt for feed " + feedid + " user " + user + " ip " + ip + " " + ioe.toString(), ioe);
+ throw ioe;
+ } finally {
+ if (is != null) {
+ try {
+ is.close();
+ } catch (Exception e) {
+ }
+ }
+ if (dos != null) {
+ try {
+ dos.close();
+ } catch (Exception e) {
+ }
+ }
+ if (mw != null) {
+ try {
+ mw.close();
+ } catch (Exception e) {
+ }
+ }
+ try {
+ data.delete();
+ } catch (Exception e) {
+ }
+ try {
+ meta.delete();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ private int getIdFromPath(HttpServletRequest req) {
+ String path = req.getPathInfo();
+ if (path == null || path.length() < 2)
+ return -1;
+ try {
+ return Integer.parseInt(path.substring(1));
+ } catch (NumberFormatException e) {
+ return -1;
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;\r
-\r
-import java.security.*;\r
-import java.io.*;\r
-import java.util.*;\r
-import java.security.cert.*;\r
-import java.net.*;\r
-import java.text.*;\r
-import org.apache.commons.codec.binary.Base64;\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-import org.slf4j.MDC;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * Utility functions for the data router node\r
- */\r
-public class NodeUtils {\r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeUtils");\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeUtils");\r
- private static SimpleDateFormat logdate;\r
- static {\r
- logdate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");\r
- logdate.setTimeZone(TimeZone.getTimeZone("GMT"));\r
- }\r
- private NodeUtils() {}\r
- /**\r
- * Base64 encode a byte array\r
- * @param raw The bytes to be encoded\r
- * @return The encoded string\r
- */\r
- public static String base64Encode(byte[] raw) {\r
- return(Base64.encodeBase64String(raw));\r
- }\r
- /**\r
- * Given a user and password, generate the credentials\r
- * @param user User name\r
- * @param password User password\r
- * @return Authorization header value\r
- */\r
- public static String getAuthHdr(String user, String password) {\r
- if (user == null || password == null) {\r
- return(null);\r
- }\r
- return("Basic " + base64Encode((user + ":" + password).getBytes()));\r
- }\r
- /**\r
- * Given a node name, generate the credentials\r
- * @param node Node name\r
- */\r
- public static String getNodeAuthHdr(String node, String key) {\r
- try {\r
- MessageDigest md = MessageDigest.getInstance("SHA");\r
- md.update(key.getBytes());\r
- md.update(node.getBytes());\r
- md.update(key.getBytes());\r
- return(getAuthHdr(node, base64Encode(md.digest())));\r
- } catch (Exception e) {\r
- return(null);\r
- }\r
- }\r
- /**\r
- * Given a keystore file and its password, return the value of the CN of the first private key entry with a certificate.\r
- * @param kstype The type of keystore\r
- * @param ksfile The file name of the keystore\r
- * @param kspass The password of the keystore\r
- * @return CN of the certificate subject or null\r
- */\r
- public static String getCanonicalName(String kstype, String ksfile, String kspass) {\r
- try {\r
- KeyStore ks = KeyStore.getInstance(kstype);\r
- ks.load(new FileInputStream(ksfile), kspass.toCharArray());\r
- return(getCanonicalName(ks));\r
- } catch (Exception e) {\r
- setIpAndFqdnForEelf("getCanonicalName");\r
- eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_LOAD_ERROR, ksfile, e.toString());\r
- logger.error("NODE0401 Error loading my keystore file + " + ksfile + " " + e.toString(), e);\r
- return(null);\r
- }\r
- }\r
- /**\r
- * Given a keystore, return the value of the CN of the first private key entry with a certificate.\r
- * @param ks The KeyStore\r
- * @return CN of the certificate subject or null\r
- */\r
- public static String getCanonicalName(KeyStore ks) {\r
- try {\r
- Enumeration<String> aliases = ks.aliases();\r
- while (aliases.hasMoreElements()) {\r
- String s = aliases.nextElement();\r
- if (ks.entryInstanceOf(s, KeyStore.PrivateKeyEntry.class)) {\r
- X509Certificate c = (X509Certificate)ks.getCertificate(s);\r
- if (c != null) {\r
- String subject = c.getSubjectX500Principal().getName();\r
- String[] parts = subject.split(",");\r
- if (parts.length < 1) {\r
- return(null);\r
- }\r
- subject = parts[0].trim();\r
- if (!subject.startsWith("CN=")) {\r
- return(null);\r
-\r
- }\r
- return(subject.substring(3));\r
- }\r
- }\r
- }\r
- } catch (Exception e) {\r
- logger.error("NODE0402 Error extracting my name from my keystore file " + e.toString(), e);\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Given a string representation of an IP address, get the corresponding byte array\r
- * @param ip The IP address as a string\r
- * @return The IP address as a byte array or null if the address is invalid\r
- */\r
- public static byte[] getInetAddress(String ip) {\r
- try {\r
- return(InetAddress.getByName(ip).getAddress());\r
- } catch (Exception e) {\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Given a uri with parameters, split out the feed ID and file ID\r
- */\r
- public static String[] getFeedAndFileID(String uriandparams) {\r
- int end = uriandparams.length();\r
- int i = uriandparams.indexOf('#');\r
- if (i != -1 && i < end) {\r
- end = i;\r
- }\r
- i = uriandparams.indexOf('?');\r
- if (i != -1 && i < end) {\r
- end = i;\r
- }\r
- end = uriandparams.lastIndexOf('/', end);\r
- if (end < 2) {\r
- return(null);\r
- }\r
- i = uriandparams.lastIndexOf('/', end - 1);\r
- if (i == -1) {\r
- return(null);\r
- }\r
- return(new String[] { uriandparams.substring(i + 1, end - 1), uriandparams.substring(end + 1) });\r
- }\r
- /**\r
- * Escape fields that might contain vertical bar, backslash, or newline by replacing them with backslash p, backslash e and backslash n.\r
- */\r
- public static String loge(String s) {\r
- if (s == null) {\r
- return(s);\r
- }\r
- return(s.replaceAll("\\\\", "\\\\e").replaceAll("\\|", "\\\\p").replaceAll("\n", "\\\\n"));\r
- }\r
- /**\r
- * Undo what loge does.\r
- */\r
- public static String unloge(String s) {\r
- if (s == null) {\r
- return(s);\r
- }\r
- return(s.replaceAll("\\\\p", "\\|").replaceAll("\\\\n", "\n").replaceAll("\\\\e", "\\\\"));\r
- }\r
- /**\r
- * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ\r
- */\r
- public static String logts(long when) {\r
- return(logts(new Date(when)));\r
- }\r
- /**\r
- * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ\r
- */\r
- public static synchronized String logts(Date when) {\r
- return(logdate.format(when));\r
- }\r
- \r
- /* Method prints method name, server FQDN and IP Address of the machine in EELF logs\r
- * @Method - setIpAndFqdnForEelf - Rally:US664892 \r
- * @Params - method, prints method name in EELF log.\r
- */ \r
- public static void setIpAndFqdnForEelf(String method) {\r
- MDC.clear();\r
- MDC.put(MDC_SERVICE_NAME, method);\r
- try {\r
- MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());\r
- MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- }\r
-\r
- }\r
- \r
-\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;
+
+import java.security.*;
+import java.io.*;
+import java.util.*;
+import java.security.cert.*;
+import java.net.*;
+import java.text.*;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+import org.slf4j.MDC;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * Utility functions for the data router node
+ */
+public class NodeUtils {
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeUtils");
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeUtils");
+ private static SimpleDateFormat logdate;
+
+ static {
+ logdate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
+ logdate.setTimeZone(TimeZone.getTimeZone("GMT"));
+ }
+
+ private NodeUtils() {
+ }
+
+ /**
+ * Base64 encode a byte array
+ *
+ * @param raw The bytes to be encoded
+ * @return The encoded string
+ */
+ public static String base64Encode(byte[] raw) {
+ return (Base64.encodeBase64String(raw));
+ }
+
+ /**
+ * Given a user and password, generate the credentials
+ *
+ * @param user User name
+ * @param password User password
+ * @return Authorization header value
+ */
+ public static String getAuthHdr(String user, String password) {
+ if (user == null || password == null) {
+ return (null);
+ }
+ return ("Basic " + base64Encode((user + ":" + password).getBytes()));
+ }
+
+ /**
+ * Given a node name, generate the credentials
+ *
+ * @param node Node name
+ */
+ public static String getNodeAuthHdr(String node, String key) {
+ try {
+ MessageDigest md = MessageDigest.getInstance("SHA");
+ md.update(key.getBytes());
+ md.update(node.getBytes());
+ md.update(key.getBytes());
+ return (getAuthHdr(node, base64Encode(md.digest())));
+ } catch (Exception e) {
+ return (null);
+ }
+ }
+
+ /**
+ * Given a keystore file and its password, return the value of the CN of the first private key entry with a certificate.
+ *
+ * @param kstype The type of keystore
+ * @param ksfile The file name of the keystore
+ * @param kspass The password of the keystore
+ * @return CN of the certificate subject or null
+ */
+ public static String getCanonicalName(String kstype, String ksfile, String kspass) {
+ try {
+ KeyStore ks = KeyStore.getInstance(kstype);
+ ks.load(new FileInputStream(ksfile), kspass.toCharArray());
+ return (getCanonicalName(ks));
+ } catch (Exception e) {
+ setIpAndFqdnForEelf("getCanonicalName");
+ eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_LOAD_ERROR, ksfile, e.toString());
+ logger.error("NODE0401 Error loading my keystore file + " + ksfile + " " + e.toString(), e);
+ return (null);
+ }
+ }
+
+ /**
+ * Given a keystore, return the value of the CN of the first private key entry with a certificate.
+ *
+ * @param ks The KeyStore
+ * @return CN of the certificate subject or null
+ */
+ public static String getCanonicalName(KeyStore ks) {
+ try {
+ Enumeration<String> aliases = ks.aliases();
+ while (aliases.hasMoreElements()) {
+ String s = aliases.nextElement();
+ if (ks.entryInstanceOf(s, KeyStore.PrivateKeyEntry.class)) {
+ X509Certificate c = (X509Certificate) ks.getCertificate(s);
+ if (c != null) {
+ String subject = c.getSubjectX500Principal().getName();
+ String[] parts = subject.split(",");
+ if (parts.length < 1) {
+ return (null);
+ }
+ subject = parts[0].trim();
+ if (!subject.startsWith("CN=")) {
+ return (null);
+
+ }
+ return (subject.substring(3));
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.error("NODE0402 Error extracting my name from my keystore file " + e.toString(), e);
+ }
+ return (null);
+ }
+
+ /**
+ * Given a string representation of an IP address, get the corresponding byte array
+ *
+ * @param ip The IP address as a string
+ * @return The IP address as a byte array or null if the address is invalid
+ */
+ public static byte[] getInetAddress(String ip) {
+ try {
+ return (InetAddress.getByName(ip).getAddress());
+ } catch (Exception e) {
+ }
+ return (null);
+ }
+
+ /**
+ * Given a uri with parameters, split out the feed ID and file ID
+ */
+ public static String[] getFeedAndFileID(String uriandparams) {
+ int end = uriandparams.length();
+ int i = uriandparams.indexOf('#');
+ if (i != -1 && i < end) {
+ end = i;
+ }
+ i = uriandparams.indexOf('?');
+ if (i != -1 && i < end) {
+ end = i;
+ }
+ end = uriandparams.lastIndexOf('/', end);
+ if (end < 2) {
+ return (null);
+ }
+ i = uriandparams.lastIndexOf('/', end - 1);
+ if (i == -1) {
+ return (null);
+ }
+ return (new String[]{uriandparams.substring(i + 1, end - 1), uriandparams.substring(end + 1)});
+ }
+
+ /**
+ * Escape fields that might contain vertical bar, backslash, or newline by replacing them with backslash p, backslash e and backslash n.
+ */
+ public static String loge(String s) {
+ if (s == null) {
+ return (s);
+ }
+ return (s.replaceAll("\\\\", "\\\\e").replaceAll("\\|", "\\\\p").replaceAll("\n", "\\\\n"));
+ }
+
+ /**
+ * Undo what loge does.
+ */
+ public static String unloge(String s) {
+ if (s == null) {
+ return (s);
+ }
+ return (s.replaceAll("\\\\p", "\\|").replaceAll("\\\\n", "\n").replaceAll("\\\\e", "\\\\"));
+ }
+
+ /**
+ * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ
+ */
+ public static String logts(long when) {
+ return (logts(new Date(when)));
+ }
+
+ /**
+ * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ
+ */
+ public static synchronized String logts(Date when) {
+ return (logdate.format(when));
+ }
+
+ /* Method prints method name, server FQDN and IP Address of the machine in EELF logs
+ * @Method - setIpAndFqdnForEelf - Rally:US664892
+ * @Params - method, prints method name in EELF log.
+ */
+ public static void setIpAndFqdnForEelf(String method) {
+ MDC.clear();
+ MDC.put(MDC_SERVICE_NAME, method);
+ try {
+ MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());
+ MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ }
+
+
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-\r
-/**\r
- * Given a set of node names and next hops, identify and ignore any cycles and figure out the sequence of next hops to get from this node to any other node\r
- */\r
-\r
-public class PathFinder {\r
- private static class Hop {\r
- public boolean mark;\r
- public boolean bad;\r
- public NodeConfig.ProvHop basis;\r
- }\r
- private Vector<String> errors = new Vector<String>();\r
- private Hashtable<String, String> routes = new Hashtable<String, String>();\r
- /**\r
- * Get list of errors encountered while finding paths\r
- * @return array of error descriptions\r
- */\r
- public String[] getErrors() {\r
- return(errors.toArray(new String[errors.size()]));\r
- }\r
- /**\r
- * Get the route from this node to the specified node\r
- * @param destination node\r
- * @return list of node names separated by and ending with "/"\r
- */\r
- public String getPath(String destination) {\r
- String ret = routes.get(destination);\r
- if (ret == null) {\r
- return("");\r
- }\r
- return(ret);\r
- }\r
- private String plot(String from, String to, Hashtable<String, Hop> info) {\r
- Hop nh = info.get(from);\r
- if (nh == null || nh.bad) {\r
- return(to);\r
- }\r
- if (nh.mark) {\r
- // loop detected;\r
- while (!nh.bad) {\r
- nh.bad = true;\r
- errors.add(nh.basis + " is part of a cycle");\r
- nh = info.get(nh.basis.getVia());\r
- }\r
- return(to);\r
- }\r
- nh.mark = true;\r
- String x = plot(nh.basis.getVia(), to, info);\r
- nh.mark = false;\r
- if (nh.bad) {\r
- return(to);\r
- }\r
- return(nh.basis.getVia() + "/" + x);\r
- }\r
- /**\r
- * Find routes from a specified origin to all of the nodes given a set of specified next hops.\r
- * @param origin where we start\r
- * @param nodes where we can go\r
- * @param hops detours along the way\r
- */\r
- public PathFinder(String origin, String[] nodes, NodeConfig.ProvHop[] hops) {\r
- HashSet<String> known = new HashSet<String>();\r
- Hashtable<String, Hashtable<String, Hop>> ht = new Hashtable<String, Hashtable<String, Hop>>();\r
- for (String n: nodes) {\r
- known.add(n);\r
- ht.put(n, new Hashtable<String, Hop>());\r
- }\r
- for (NodeConfig.ProvHop ph: hops) {\r
- if (!known.contains(ph.getFrom())) {\r
- errors.add(ph + " references unknown from node");\r
- continue;\r
- }\r
- if (!known.contains(ph.getTo())) {\r
- errors.add(ph + " references unknown destination node");\r
- continue;\r
- }\r
- Hashtable<String, Hop> ht2 = ht.get(ph.getTo());\r
- Hop h = ht2.get(ph.getFrom());\r
- if (h != null) {\r
- h.bad = true;\r
- errors.add(ph + " gives duplicate next hop - previous via was " + h.basis.getVia());\r
- continue;\r
- }\r
- h = new Hop();\r
- h.basis = ph;\r
- ht2.put(ph.getFrom(), h);\r
- if (!known.contains(ph.getVia())) {\r
- errors.add(ph + " references unknown via node");\r
- h.bad = true;\r
- continue;\r
- }\r
- if (ph.getVia().equals(ph.getTo())) {\r
- errors.add(ph + " gives destination as via");\r
- h.bad = true;\r
- continue;\r
- }\r
- }\r
- for (String n: known) {\r
- if (n.equals(origin)) {\r
- routes.put(n, "");\r
- }\r
- routes.put(n, plot(origin, n, ht.get(n)) + "/");\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+
+/**
+ * Given a set of node names and next hops, identify and ignore any cycles and figure out the sequence of next hops to get from this node to any other node
+ */
+
+public class PathFinder {
+ private static class Hop {
+ public boolean mark;
+ public boolean bad;
+ public NodeConfig.ProvHop basis;
+ }
+
+ private Vector<String> errors = new Vector<String>();
+ private Hashtable<String, String> routes = new Hashtable<String, String>();
+
+ /**
+ * Get list of errors encountered while finding paths
+ *
+ * @return array of error descriptions
+ */
+ public String[] getErrors() {
+ return (errors.toArray(new String[errors.size()]));
+ }
+
+ /**
+ * Get the route from this node to the specified node
+ *
+ * @param destination node
+ * @return list of node names separated by and ending with "/"
+ */
+ public String getPath(String destination) {
+ String ret = routes.get(destination);
+ if (ret == null) {
+ return ("");
+ }
+ return (ret);
+ }
+
+ private String plot(String from, String to, Hashtable<String, Hop> info) {
+ Hop nh = info.get(from);
+ if (nh == null || nh.bad) {
+ return (to);
+ }
+ if (nh.mark) {
+ // loop detected;
+ while (!nh.bad) {
+ nh.bad = true;
+ errors.add(nh.basis + " is part of a cycle");
+ nh = info.get(nh.basis.getVia());
+ }
+ return (to);
+ }
+ nh.mark = true;
+ String x = plot(nh.basis.getVia(), to, info);
+ nh.mark = false;
+ if (nh.bad) {
+ return (to);
+ }
+ return (nh.basis.getVia() + "/" + x);
+ }
+
+ /**
+ * Find routes from a specified origin to all of the nodes given a set of specified next hops.
+ *
+ * @param origin where we start
+ * @param nodes where we can go
+ * @param hops detours along the way
+ */
+ public PathFinder(String origin, String[] nodes, NodeConfig.ProvHop[] hops) {
+ HashSet<String> known = new HashSet<String>();
+ Hashtable<String, Hashtable<String, Hop>> ht = new Hashtable<String, Hashtable<String, Hop>>();
+ for (String n : nodes) {
+ known.add(n);
+ ht.put(n, new Hashtable<String, Hop>());
+ }
+ for (NodeConfig.ProvHop ph : hops) {
+ if (!known.contains(ph.getFrom())) {
+ errors.add(ph + " references unknown from node");
+ continue;
+ }
+ if (!known.contains(ph.getTo())) {
+ errors.add(ph + " references unknown destination node");
+ continue;
+ }
+ Hashtable<String, Hop> ht2 = ht.get(ph.getTo());
+ Hop h = ht2.get(ph.getFrom());
+ if (h != null) {
+ h.bad = true;
+ errors.add(ph + " gives duplicate next hop - previous via was " + h.basis.getVia());
+ continue;
+ }
+ h = new Hop();
+ h.basis = ph;
+ ht2.put(ph.getFrom(), h);
+ if (!known.contains(ph.getVia())) {
+ errors.add(ph + " references unknown via node");
+ h.bad = true;
+ continue;
+ }
+ if (ph.getVia().equals(ph.getTo())) {
+ errors.add(ph + " gives destination as via");
+ h.bad = true;
+ continue;
+ }
+ }
+ for (String n : known) {
+ if (n.equals(origin)) {
+ routes.put(n, "");
+ }
+ routes.put(n, plot(origin, n, ht.get(n)) + "/");
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.io.*;\r
-import java.util.*;\r
-import org.json.*;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-import org.apache.log4j.Logger;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * Parser for provisioning data from the provisioning server.\r
- * <p>\r
- * The ProvData class uses a Reader for the text configuration from the\r
- * provisioning server to construct arrays of raw configuration entries.\r
- */\r
-public class ProvData {\r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.ProvData");\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.ProvData");\r
- private NodeConfig.ProvNode[] pn;\r
- private NodeConfig.ProvParam[] pp;\r
- private NodeConfig.ProvFeed[] pf;\r
- private NodeConfig.ProvFeedUser[] pfu;\r
- private NodeConfig.ProvFeedSubnet[] pfsn;\r
- private NodeConfig.ProvSubscription[] ps;\r
- private NodeConfig.ProvForceIngress[] pfi;\r
- private NodeConfig.ProvForceEgress[] pfe;\r
- private NodeConfig.ProvHop[] ph;\r
- private static String[] gvasa(JSONArray a, int index) {\r
- return(gvasa(a.get(index)));\r
- }\r
- private static String[] gvasa(JSONObject o, String key) {\r
- return(gvasa(o.opt(key)));\r
- }\r
- private static String[] gvasa(Object o) {\r
- if (o instanceof JSONArray) {\r
- JSONArray a = (JSONArray)o;\r
- Vector<String> v = new Vector<String>();\r
- for (int i = 0; i < a.length(); i++) {\r
- String s = gvas(a, i);\r
- if (s != null) {\r
- v.add(s);\r
- }\r
- }\r
- return(v.toArray(new String[v.size()]));\r
- } else {\r
- String s = gvas(o);\r
- if (s == null) {\r
- return(new String[0]);\r
- } else {\r
- return(new String[] { s });\r
- }\r
- }\r
- }\r
- private static String gvas(JSONArray a, int index) {\r
- return(gvas(a.get(index)));\r
- }\r
- private static String gvas(JSONObject o, String key) {\r
- return(gvas(o.opt(key)));\r
- }\r
- private static String gvas(Object o) {\r
- if (o instanceof Boolean || o instanceof Number || o instanceof String) {\r
- return(o.toString());\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Construct raw provisioing data entries from the text (JSON)\r
- * provisioning document received from the provisioning server\r
- * @param r The reader for the JSON text.\r
- */\r
- public ProvData(Reader r) throws IOException {\r
- Vector<NodeConfig.ProvNode> pnv = new Vector<NodeConfig.ProvNode>();\r
- Vector<NodeConfig.ProvParam> ppv = new Vector<NodeConfig.ProvParam>();\r
- Vector<NodeConfig.ProvFeed> pfv = new Vector<NodeConfig.ProvFeed>();\r
- Vector<NodeConfig.ProvFeedUser> pfuv = new Vector<NodeConfig.ProvFeedUser>();\r
- Vector<NodeConfig.ProvFeedSubnet> pfsnv = new Vector<NodeConfig.ProvFeedSubnet>();\r
- Vector<NodeConfig.ProvSubscription> psv = new Vector<NodeConfig.ProvSubscription>();\r
- Vector<NodeConfig.ProvForceIngress> pfiv = new Vector<NodeConfig.ProvForceIngress>();\r
- Vector<NodeConfig.ProvForceEgress> pfev = new Vector<NodeConfig.ProvForceEgress>();\r
- Vector<NodeConfig.ProvHop> phv = new Vector<NodeConfig.ProvHop>();\r
- try {\r
- JSONTokener jtx = new JSONTokener(r);\r
- JSONObject jcfg = new JSONObject(jtx);\r
- char c = jtx.nextClean();\r
- if (c != '\0') {\r
- throw new JSONException("Spurious characters following configuration");\r
- }\r
- r.close();\r
- JSONArray jfeeds = jcfg.optJSONArray("feeds");\r
- if (jfeeds != null) {\r
- for (int fx = 0; fx < jfeeds.length(); fx++) {\r
- JSONObject jfeed = jfeeds.getJSONObject(fx);\r
- String stat = null;\r
- if (jfeed.optBoolean("suspend", false)) {\r
- stat = "Feed is suspended";\r
- }\r
- if (jfeed.optBoolean("deleted", false)) {\r
- stat = "Feed is deleted";\r
- }\r
- String fid = gvas(jfeed, "feedid");\r
- String fname = gvas(jfeed, "name");\r
- String fver = gvas(jfeed, "version");\r
- pfv.add(new NodeConfig.ProvFeed(fid, fname + "//" + fver, stat));\r
- JSONObject jauth = jfeed.optJSONObject("authorization");\r
- if (jauth == null) {\r
- continue;\r
- }\r
- JSONArray jeids = jauth.optJSONArray("endpoint_ids");\r
- if (jeids != null) {\r
- for (int ux = 0; ux < jeids.length(); ux++) {\r
- JSONObject ju = jeids.getJSONObject(ux);\r
- String login = gvas(ju, "id");\r
- String password = gvas(ju, "password");\r
- pfuv.add(new NodeConfig.ProvFeedUser(fid, login, NodeUtils.getAuthHdr(login, password)));\r
- }\r
- }\r
- JSONArray jeips = jauth.optJSONArray("endpoint_addrs");\r
- if (jeips != null) {\r
- for (int ix = 0; ix < jeips.length(); ix++) {\r
- String sn = gvas(jeips, ix);\r
- pfsnv.add(new NodeConfig.ProvFeedSubnet(fid, sn));\r
- }\r
- }\r
- }\r
- }\r
- JSONArray jsubs = jcfg.optJSONArray("subscriptions");\r
- if (jsubs != null) {\r
- for (int sx = 0; sx < jsubs.length(); sx++) {\r
- JSONObject jsub = jsubs.getJSONObject(sx);\r
- if (jsub.optBoolean("suspend", false)) {\r
- continue;\r
- }\r
- String sid = gvas(jsub, "subid");\r
- String fid = gvas(jsub, "feedid");\r
- JSONObject jdel = jsub.getJSONObject("delivery");\r
- String delurl = gvas(jdel, "url");\r
- String id = gvas(jdel, "user");\r
- String password = gvas(jdel, "password");\r
- boolean monly = jsub.getBoolean("metadataOnly");\r
- boolean use100 = jdel.getBoolean("use100");\r
- psv.add(new NodeConfig.ProvSubscription(sid, fid, delurl, id, NodeUtils.getAuthHdr(id, password), monly, use100));\r
- }\r
- }\r
- JSONObject jparams = jcfg.optJSONObject("parameters");\r
- if (jparams != null) {\r
- for (String pname: JSONObject.getNames(jparams)) {\r
- String pvalue = gvas(jparams, pname);\r
- if (pvalue != null) {\r
- ppv.add(new NodeConfig.ProvParam(pname, pvalue));\r
- }\r
- }\r
- String sfx = gvas(jparams, "PROV_DOMAIN");\r
- JSONArray jnodes = jparams.optJSONArray("NODES");\r
- if (jnodes != null) {\r
- for (int nx = 0; nx < jnodes.length(); nx++) {\r
- String nn = gvas(jnodes, nx);\r
- if (nn.indexOf('.') == -1) {\r
- nn = nn + "." + sfx;\r
- }\r
- pnv.add(new NodeConfig.ProvNode(nn));\r
- }\r
- }\r
- }\r
- JSONArray jingresses = jcfg.optJSONArray("ingress");\r
- if (jingresses != null) {\r
- for (int fx = 0; fx < jingresses.length(); fx++) {\r
- JSONObject jingress = jingresses.getJSONObject(fx);\r
- String fid = gvas(jingress, "feedid");\r
- String subnet = gvas(jingress, "subnet");\r
- String user = gvas(jingress, "user");\r
- String[] nodes = gvasa(jingress, "node");\r
- if (fid == null || "".equals(fid)) {\r
- continue;\r
- }\r
- if ("".equals(subnet)) {\r
- subnet = null;\r
- }\r
- if ("".equals(user)) {\r
- user = null;\r
- }\r
- pfiv.add(new NodeConfig.ProvForceIngress(fid, subnet, user, nodes));\r
- }\r
- }\r
- JSONObject jegresses = jcfg.optJSONObject("egress");\r
- if (jegresses != null && JSONObject.getNames(jegresses) != null) {\r
- for (String esid: JSONObject.getNames(jegresses)) {\r
- String enode = gvas(jegresses, esid);\r
- if (esid != null && enode != null && !"".equals(esid) && !"".equals(enode)) {\r
- pfev.add(new NodeConfig.ProvForceEgress(esid, enode));\r
- }\r
- }\r
- }\r
- JSONArray jhops = jcfg.optJSONArray("routing");\r
- if (jhops != null) {\r
- for (int fx = 0; fx < jhops.length(); fx++) {\r
- JSONObject jhop = jhops.getJSONObject(fx);\r
- String from = gvas(jhop, "from");\r
- String to = gvas(jhop, "to");\r
- String via = gvas(jhop, "via");\r
- if (from == null || to == null || via == null || "".equals(from) || "".equals(to) || "".equals(via)) {\r
- continue;\r
- }\r
- phv.add(new NodeConfig.ProvHop(from, to, via));\r
- }\r
- }\r
- } catch (JSONException jse) {\r
- NodeUtils.setIpAndFqdnForEelf("ProvData");\r
- eelflogger.error(EelfMsgs.MESSAGE_PARSING_ERROR, jse.toString());\r
- logger.error("NODE0201 Error parsing configuration data from provisioning server " + jse.toString(), jse);\r
- throw new IOException(jse.toString(), jse);\r
- }\r
- pn = pnv.toArray(new NodeConfig.ProvNode[pnv.size()]);\r
- pp = ppv.toArray(new NodeConfig.ProvParam[ppv.size()]);\r
- pf = pfv.toArray(new NodeConfig.ProvFeed[pfv.size()]);\r
- pfu = pfuv.toArray(new NodeConfig.ProvFeedUser[pfuv.size()]);\r
- pfsn = pfsnv.toArray(new NodeConfig.ProvFeedSubnet[pfsnv.size()]);\r
- ps = psv.toArray(new NodeConfig.ProvSubscription[psv.size()]);\r
- pfi = pfiv.toArray(new NodeConfig.ProvForceIngress[pfiv.size()]);\r
- pfe = pfev.toArray(new NodeConfig.ProvForceEgress[pfev.size()]);\r
- ph = phv.toArray(new NodeConfig.ProvHop[phv.size()]);\r
- }\r
- /**\r
- * Get the raw node configuration entries\r
- */\r
- public NodeConfig.ProvNode[] getNodes() {\r
- return(pn);\r
- }\r
- /**\r
- * Get the raw parameter configuration entries\r
- */\r
- public NodeConfig.ProvParam[] getParams() {\r
- return(pp);\r
- }\r
- /**\r
- * Ge the raw feed configuration entries\r
- */\r
- public NodeConfig.ProvFeed[] getFeeds() {\r
- return(pf);\r
- }\r
- /**\r
- * Get the raw feed user configuration entries\r
- */\r
- public NodeConfig.ProvFeedUser[] getFeedUsers() {\r
- return(pfu);\r
- }\r
- /**\r
- * Get the raw feed subnet configuration entries\r
- */\r
- public NodeConfig.ProvFeedSubnet[] getFeedSubnets() {\r
- return(pfsn);\r
- }\r
- /**\r
- * Get the raw subscription entries\r
- */\r
- public NodeConfig.ProvSubscription[] getSubscriptions() {\r
- return(ps);\r
- }\r
- /**\r
- * Get the raw forced ingress entries\r
- */\r
- public NodeConfig.ProvForceIngress[] getForceIngress() {\r
- return(pfi);\r
- }\r
- /**\r
- * Get the raw forced egress entries\r
- */\r
- public NodeConfig.ProvForceEgress[] getForceEgress() {\r
- return(pfe);\r
- }\r
- /**\r
- * Get the raw next hop entries\r
- */\r
- public NodeConfig.ProvHop[] getHops() {\r
- return(ph);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.io.*;
+import java.util.*;
+
+import org.json.*;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+import org.apache.log4j.Logger;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * Parser for provisioning data from the provisioning server.
+ * <p>
+ * The ProvData class uses a Reader for the text configuration from the
+ * provisioning server to construct arrays of raw configuration entries.
+ */
+public class ProvData {
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.ProvData");
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.ProvData");
+ private NodeConfig.ProvNode[] pn;
+ private NodeConfig.ProvParam[] pp;
+ private NodeConfig.ProvFeed[] pf;
+ private NodeConfig.ProvFeedUser[] pfu;
+ private NodeConfig.ProvFeedSubnet[] pfsn;
+ private NodeConfig.ProvSubscription[] ps;
+ private NodeConfig.ProvForceIngress[] pfi;
+ private NodeConfig.ProvForceEgress[] pfe;
+ private NodeConfig.ProvHop[] ph;
+
+ private static String[] gvasa(JSONArray a, int index) {
+ return (gvasa(a.get(index)));
+ }
+
+ private static String[] gvasa(JSONObject o, String key) {
+ return (gvasa(o.opt(key)));
+ }
+
+ private static String[] gvasa(Object o) {
+ if (o instanceof JSONArray) {
+ JSONArray a = (JSONArray) o;
+ Vector<String> v = new Vector<String>();
+ for (int i = 0; i < a.length(); i++) {
+ String s = gvas(a, i);
+ if (s != null) {
+ v.add(s);
+ }
+ }
+ return (v.toArray(new String[v.size()]));
+ } else {
+ String s = gvas(o);
+ if (s == null) {
+ return (new String[0]);
+ } else {
+ return (new String[]{s});
+ }
+ }
+ }
+
+ private static String gvas(JSONArray a, int index) {
+ return (gvas(a.get(index)));
+ }
+
+ private static String gvas(JSONObject o, String key) {
+ return (gvas(o.opt(key)));
+ }
+
+ private static String gvas(Object o) {
+ if (o instanceof Boolean || o instanceof Number || o instanceof String) {
+ return (o.toString());
+ }
+ return (null);
+ }
+
+ /**
+ * Construct raw provisioing data entries from the text (JSON)
+ * provisioning document received from the provisioning server
+ *
+ * @param r The reader for the JSON text.
+ */
+ public ProvData(Reader r) throws IOException {
+ Vector<NodeConfig.ProvNode> pnv = new Vector<NodeConfig.ProvNode>();
+ Vector<NodeConfig.ProvParam> ppv = new Vector<NodeConfig.ProvParam>();
+ Vector<NodeConfig.ProvFeed> pfv = new Vector<NodeConfig.ProvFeed>();
+ Vector<NodeConfig.ProvFeedUser> pfuv = new Vector<NodeConfig.ProvFeedUser>();
+ Vector<NodeConfig.ProvFeedSubnet> pfsnv = new Vector<NodeConfig.ProvFeedSubnet>();
+ Vector<NodeConfig.ProvSubscription> psv = new Vector<NodeConfig.ProvSubscription>();
+ Vector<NodeConfig.ProvForceIngress> pfiv = new Vector<NodeConfig.ProvForceIngress>();
+ Vector<NodeConfig.ProvForceEgress> pfev = new Vector<NodeConfig.ProvForceEgress>();
+ Vector<NodeConfig.ProvHop> phv = new Vector<NodeConfig.ProvHop>();
+ try {
+ JSONTokener jtx = new JSONTokener(r);
+ JSONObject jcfg = new JSONObject(jtx);
+ char c = jtx.nextClean();
+ if (c != '\0') {
+ throw new JSONException("Spurious characters following configuration");
+ }
+ r.close();
+ JSONArray jfeeds = jcfg.optJSONArray("feeds");
+ if (jfeeds != null) {
+ for (int fx = 0; fx < jfeeds.length(); fx++) {
+ JSONObject jfeed = jfeeds.getJSONObject(fx);
+ String stat = null;
+ if (jfeed.optBoolean("suspend", false)) {
+ stat = "Feed is suspended";
+ }
+ if (jfeed.optBoolean("deleted", false)) {
+ stat = "Feed is deleted";
+ }
+ String fid = gvas(jfeed, "feedid");
+ String fname = gvas(jfeed, "name");
+ String fver = gvas(jfeed, "version");
+ pfv.add(new NodeConfig.ProvFeed(fid, fname + "//" + fver, stat));
+ JSONObject jauth = jfeed.optJSONObject("authorization");
+ if (jauth == null) {
+ continue;
+ }
+ JSONArray jeids = jauth.optJSONArray("endpoint_ids");
+ if (jeids != null) {
+ for (int ux = 0; ux < jeids.length(); ux++) {
+ JSONObject ju = jeids.getJSONObject(ux);
+ String login = gvas(ju, "id");
+ String password = gvas(ju, "password");
+ pfuv.add(new NodeConfig.ProvFeedUser(fid, login, NodeUtils.getAuthHdr(login, password)));
+ }
+ }
+ JSONArray jeips = jauth.optJSONArray("endpoint_addrs");
+ if (jeips != null) {
+ for (int ix = 0; ix < jeips.length(); ix++) {
+ String sn = gvas(jeips, ix);
+ pfsnv.add(new NodeConfig.ProvFeedSubnet(fid, sn));
+ }
+ }
+ }
+ }
+ JSONArray jsubs = jcfg.optJSONArray("subscriptions");
+ if (jsubs != null) {
+ for (int sx = 0; sx < jsubs.length(); sx++) {
+ JSONObject jsub = jsubs.getJSONObject(sx);
+ if (jsub.optBoolean("suspend", false)) {
+ continue;
+ }
+ String sid = gvas(jsub, "subid");
+ String fid = gvas(jsub, "feedid");
+ JSONObject jdel = jsub.getJSONObject("delivery");
+ String delurl = gvas(jdel, "url");
+ String id = gvas(jdel, "user");
+ String password = gvas(jdel, "password");
+ boolean monly = jsub.getBoolean("metadataOnly");
+ boolean use100 = jdel.getBoolean("use100");
+ psv.add(new NodeConfig.ProvSubscription(sid, fid, delurl, id, NodeUtils.getAuthHdr(id, password), monly, use100));
+ }
+ }
+ JSONObject jparams = jcfg.optJSONObject("parameters");
+ if (jparams != null) {
+ for (String pname : JSONObject.getNames(jparams)) {
+ String pvalue = gvas(jparams, pname);
+ if (pvalue != null) {
+ ppv.add(new NodeConfig.ProvParam(pname, pvalue));
+ }
+ }
+ String sfx = gvas(jparams, "PROV_DOMAIN");
+ JSONArray jnodes = jparams.optJSONArray("NODES");
+ if (jnodes != null) {
+ for (int nx = 0; nx < jnodes.length(); nx++) {
+ String nn = gvas(jnodes, nx);
+ if (nn.indexOf('.') == -1) {
+ nn = nn + "." + sfx;
+ }
+ pnv.add(new NodeConfig.ProvNode(nn));
+ }
+ }
+ }
+ JSONArray jingresses = jcfg.optJSONArray("ingress");
+ if (jingresses != null) {
+ for (int fx = 0; fx < jingresses.length(); fx++) {
+ JSONObject jingress = jingresses.getJSONObject(fx);
+ String fid = gvas(jingress, "feedid");
+ String subnet = gvas(jingress, "subnet");
+ String user = gvas(jingress, "user");
+ String[] nodes = gvasa(jingress, "node");
+ if (fid == null || "".equals(fid)) {
+ continue;
+ }
+ if ("".equals(subnet)) {
+ subnet = null;
+ }
+ if ("".equals(user)) {
+ user = null;
+ }
+ pfiv.add(new NodeConfig.ProvForceIngress(fid, subnet, user, nodes));
+ }
+ }
+ JSONObject jegresses = jcfg.optJSONObject("egress");
+ if (jegresses != null && JSONObject.getNames(jegresses) != null) {
+ for (String esid : JSONObject.getNames(jegresses)) {
+ String enode = gvas(jegresses, esid);
+ if (esid != null && enode != null && !"".equals(esid) && !"".equals(enode)) {
+ pfev.add(new NodeConfig.ProvForceEgress(esid, enode));
+ }
+ }
+ }
+ JSONArray jhops = jcfg.optJSONArray("routing");
+ if (jhops != null) {
+ for (int fx = 0; fx < jhops.length(); fx++) {
+ JSONObject jhop = jhops.getJSONObject(fx);
+ String from = gvas(jhop, "from");
+ String to = gvas(jhop, "to");
+ String via = gvas(jhop, "via");
+ if (from == null || to == null || via == null || "".equals(from) || "".equals(to) || "".equals(via)) {
+ continue;
+ }
+ phv.add(new NodeConfig.ProvHop(from, to, via));
+ }
+ }
+ } catch (JSONException jse) {
+ NodeUtils.setIpAndFqdnForEelf("ProvData");
+ eelflogger.error(EelfMsgs.MESSAGE_PARSING_ERROR, jse.toString());
+ logger.error("NODE0201 Error parsing configuration data from provisioning server " + jse.toString(), jse);
+ throw new IOException(jse.toString(), jse);
+ }
+ pn = pnv.toArray(new NodeConfig.ProvNode[pnv.size()]);
+ pp = ppv.toArray(new NodeConfig.ProvParam[ppv.size()]);
+ pf = pfv.toArray(new NodeConfig.ProvFeed[pfv.size()]);
+ pfu = pfuv.toArray(new NodeConfig.ProvFeedUser[pfuv.size()]);
+ pfsn = pfsnv.toArray(new NodeConfig.ProvFeedSubnet[pfsnv.size()]);
+ ps = psv.toArray(new NodeConfig.ProvSubscription[psv.size()]);
+ pfi = pfiv.toArray(new NodeConfig.ProvForceIngress[pfiv.size()]);
+ pfe = pfev.toArray(new NodeConfig.ProvForceEgress[pfev.size()]);
+ ph = phv.toArray(new NodeConfig.ProvHop[phv.size()]);
+ }
+
+ /**
+ * Get the raw node configuration entries
+ */
+ public NodeConfig.ProvNode[] getNodes() {
+ return (pn);
+ }
+
+ /**
+ * Get the raw parameter configuration entries
+ */
+ public NodeConfig.ProvParam[] getParams() {
+ return (pp);
+ }
+
+ /**
+ * Ge the raw feed configuration entries
+ */
+ public NodeConfig.ProvFeed[] getFeeds() {
+ return (pf);
+ }
+
+ /**
+ * Get the raw feed user configuration entries
+ */
+ public NodeConfig.ProvFeedUser[] getFeedUsers() {
+ return (pfu);
+ }
+
+ /**
+ * Get the raw feed subnet configuration entries
+ */
+ public NodeConfig.ProvFeedSubnet[] getFeedSubnets() {
+ return (pfsn);
+ }
+
+ /**
+ * Get the raw subscription entries
+ */
+ public NodeConfig.ProvSubscription[] getSubscriptions() {
+ return (ps);
+ }
+
+ /**
+ * Get the raw forced ingress entries
+ */
+ public NodeConfig.ProvForceIngress[] getForceIngress() {
+ return (pfi);
+ }
+
+ /**
+ * Get the raw forced egress entries
+ */
+ public NodeConfig.ProvForceEgress[] getForceEgress() {
+ return (pfe);
+ }
+
+ /**
+ * Get the raw next hop entries
+ */
+ public NodeConfig.ProvHop[] getHops() {
+ return (ph);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Generate publish IDs\r
- */\r
-public class PublishId {\r
- private long nextuid;\r
- private String myname;\r
-\r
- /**\r
- * Generate publish IDs for the specified name\r
- * @param myname Unique identifier for this publish ID generator (usually fqdn of server)\r
- */\r
- public PublishId(String myname) {\r
- this.myname = myname;\r
- }\r
- /**\r
- * Generate a Data Router Publish ID that uniquely identifies the particular invocation of the Publish API for log correlation purposes.\r
- */\r
- public synchronized String next() {\r
- long now = System.currentTimeMillis();\r
- if (now < nextuid) {\r
- now = nextuid;\r
- }\r
- nextuid = now + 1;\r
- return(now + "." + myname);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Generate publish IDs
+ */
+public class PublishId {
+ private long nextuid;
+ private String myname;
+
+ /**
+ * Generate publish IDs for the specified name
+ *
+ * @param myname Unique identifier for this publish ID generator (usually fqdn of server)
+ */
+ public PublishId(String myname) {
+ this.myname = myname;
+ }
+
+ /**
+ * Generate a Data Router Publish ID that uniquely identifies the particular invocation of the Publish API for log correlation purposes.
+ */
+ public synchronized String next() {
+ long now = System.currentTimeMillis();
+ if (now < nextuid) {
+ now = nextuid;
+ }
+ nextuid = now + 1;
+ return (now + "." + myname);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-\r
-/**\r
- * Execute an operation no more frequently than a specified interval\r
- */\r
-\r
-public abstract class RateLimitedOperation implements Runnable {\r
- private boolean marked; // a timer task exists\r
- private boolean executing; // the operation is currently in progress\r
- private boolean remark; // a request was made while the operation was in progress\r
- private Timer timer;\r
- private long last; // when the last operation started\r
- private long mininterval;\r
- /**\r
- * Create a rate limited operation\r
- * @param mininterval The minimum number of milliseconds after the last execution starts before a new execution can begin\r
- * @param timer The timer used to perform deferred executions\r
- */\r
- public RateLimitedOperation(long mininterval, Timer timer) {\r
- this.timer = timer;\r
- this.mininterval = mininterval;\r
- }\r
- private class deferred extends TimerTask {\r
- public void run() {\r
- execute();\r
- }\r
- }\r
- private synchronized void unmark() {\r
- marked = false;\r
- }\r
- private void execute() {\r
- unmark();\r
- request();\r
- }\r
- /**\r
- * Request that the operation be performed by this thread or at a later time by the timer\r
- */\r
- public void request() {\r
- if (premark()) {\r
- return;\r
- }\r
- do {\r
- run();\r
- } while (demark());\r
- }\r
- private synchronized boolean premark() {\r
- if (executing) {\r
- // currently executing - wait until it finishes\r
- remark = true;\r
- return(true);\r
- }\r
- if (marked) {\r
- // timer currently running - will run when it expires\r
- return(true);\r
- }\r
- long now = System.currentTimeMillis();\r
- if (last + mininterval > now) {\r
- // too soon - schedule a timer\r
- marked = true;\r
- timer.schedule(new deferred(), last + mininterval - now);\r
- return(true);\r
- }\r
- last = now;\r
- executing = true;\r
- // start execution\r
- return(false);\r
- }\r
- private synchronized boolean demark() {\r
- executing = false;\r
- if (remark) {\r
- remark = false;\r
- return(!premark());\r
- }\r
- return(false);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+
+/**
+ * Execute an operation no more frequently than a specified interval
+ */
+
+public abstract class RateLimitedOperation implements Runnable {
+ private boolean marked; // a timer task exists
+ private boolean executing; // the operation is currently in progress
+ private boolean remark; // a request was made while the operation was in progress
+ private Timer timer;
+ private long last; // when the last operation started
+ private long mininterval;
+
+ /**
+ * Create a rate limited operation
+ *
+ * @param mininterval The minimum number of milliseconds after the last execution starts before a new execution can begin
+ * @param timer The timer used to perform deferred executions
+ */
+ public RateLimitedOperation(long mininterval, Timer timer) {
+ this.timer = timer;
+ this.mininterval = mininterval;
+ }
+
+ private class deferred extends TimerTask {
+ public void run() {
+ execute();
+ }
+ }
+
+ private synchronized void unmark() {
+ marked = false;
+ }
+
+ private void execute() {
+ unmark();
+ request();
+ }
+
+ /**
+ * Request that the operation be performed by this thread or at a later time by the timer
+ */
+ public void request() {
+ if (premark()) {
+ return;
+ }
+ do {
+ run();
+ } while (demark());
+ }
+
+ private synchronized boolean premark() {
+ if (executing) {
+ // currently executing - wait until it finishes
+ remark = true;
+ return (true);
+ }
+ if (marked) {
+ // timer currently running - will run when it expires
+ return (true);
+ }
+ long now = System.currentTimeMillis();
+ if (last + mininterval > now) {
+ // too soon - schedule a timer
+ marked = true;
+ timer.schedule(new deferred(), last + mininterval - now);
+ return (true);
+ }
+ last = now;
+ executing = true;
+ // start execution
+ return (false);
+ }
+
+ private synchronized boolean demark() {
+ executing = false;
+ if (remark) {
+ remark = false;
+ return (!premark());
+ }
+ return (false);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.io.*;\r
-\r
-/**\r
- * Track redirections of subscriptions\r
- */\r
-public class RedirManager {\r
- private Hashtable<String, String> sid2primary = new Hashtable<String, String>();\r
- private Hashtable<String, String> sid2secondary = new Hashtable<String, String>();\r
- private String redirfile;\r
- RateLimitedOperation op;\r
- /**\r
- * Create a mechanism for maintaining subscription redirections.\r
- * @param redirfile The file to store the redirection information.\r
- * @param mininterval The minimum number of milliseconds between writes to the redirection information file.\r
- * @param timer The timer thread used to run delayed file writes.\r
- */\r
- public RedirManager(String redirfile, long mininterval, Timer timer) {\r
- this.redirfile = redirfile;\r
- op = new RateLimitedOperation(mininterval, timer) {\r
- public void run() {\r
- try {\r
- StringBuffer sb = new StringBuffer();\r
- for (String s: sid2primary.keySet()) {\r
- sb.append(s).append(' ').append(sid2primary.get(s)).append(' ').append(sid2secondary.get(s)).append('\n');\r
- }\r
- OutputStream os = new FileOutputStream(RedirManager.this.redirfile);\r
- os.write(sb.toString().getBytes());\r
- os.close();\r
- } catch (Exception e) {\r
- }\r
- }\r
- };\r
- try {\r
- String s;\r
- BufferedReader br = new BufferedReader(new FileReader(redirfile));\r
- while ((s = br.readLine()) != null) {\r
- s = s.trim();\r
- String[] sx = s.split(" ");\r
- if (s.startsWith("#") || sx.length != 3) {\r
- continue;\r
- }\r
- sid2primary.put(sx[0], sx[1]);\r
- sid2secondary.put(sx[0], sx[2]);\r
- }\r
- br.close();\r
- } catch (Exception e) {\r
- // missing file is normal\r
- }\r
- }\r
- /**\r
- * Set up redirection. If a request is to be sent to subscription ID sid, and that is configured to go to URL primary, instead, go to secondary.\r
- * @param sid The subscription ID to be redirected\r
- * @param primary The URL associated with that subscription ID\r
- * @param secondary The replacement URL to use instead\r
- */\r
- public synchronized void redirect(String sid, String primary, String secondary) {\r
- sid2primary.put(sid, primary);\r
- sid2secondary.put(sid, secondary);\r
- op.request();\r
- }\r
- /**\r
- * Cancel redirection. If a request is to be sent to subscription ID sid, send it to its primary URL.\r
- * @param sid The subscription ID to remove from the table.\r
- */\r
- public synchronized void forget(String sid) {\r
- sid2primary.remove(sid);\r
- sid2secondary.remove(sid);\r
- op.request();\r
- }\r
- /**\r
- * Look up where to send a subscription. If the primary has changed or there is no redirection, use the primary. Otherwise, redirect to the secondary URL.\r
- * @param sid The subscription ID to look up.\r
- * @param primary The configured primary URL.\r
- * @return The destination URL to really use.\r
- */\r
- public synchronized String lookup(String sid, String primary) {\r
- String oprim = sid2primary.get(sid);\r
- if (primary.equals(oprim)) {\r
- return(sid2secondary.get(sid));\r
- } else if (oprim != null) {\r
- forget(sid);\r
- } \r
- return(primary);\r
- }\r
- /**\r
- * Is a subscription redirected?\r
- */\r
- public synchronized boolean isRedirected(String sid) {\r
- return(sid != null && sid2secondary.get(sid) != null);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.io.*;
+
+/**
+ * Track redirections of subscriptions
+ */
+public class RedirManager {
+ private Hashtable<String, String> sid2primary = new Hashtable<String, String>();
+ private Hashtable<String, String> sid2secondary = new Hashtable<String, String>();
+ private String redirfile;
+ RateLimitedOperation op;
+
+ /**
+ * Create a mechanism for maintaining subscription redirections.
+ *
+ * @param redirfile The file to store the redirection information.
+ * @param mininterval The minimum number of milliseconds between writes to the redirection information file.
+ * @param timer The timer thread used to run delayed file writes.
+ */
+ public RedirManager(String redirfile, long mininterval, Timer timer) {
+ this.redirfile = redirfile;
+ op = new RateLimitedOperation(mininterval, timer) {
+ public void run() {
+ try {
+ StringBuffer sb = new StringBuffer();
+ for (String s : sid2primary.keySet()) {
+ sb.append(s).append(' ').append(sid2primary.get(s)).append(' ').append(sid2secondary.get(s)).append('\n');
+ }
+ OutputStream os = new FileOutputStream(RedirManager.this.redirfile);
+ os.write(sb.toString().getBytes());
+ os.close();
+ } catch (Exception e) {
+ }
+ }
+ };
+ try {
+ String s;
+ BufferedReader br = new BufferedReader(new FileReader(redirfile));
+ while ((s = br.readLine()) != null) {
+ s = s.trim();
+ String[] sx = s.split(" ");
+ if (s.startsWith("#") || sx.length != 3) {
+ continue;
+ }
+ sid2primary.put(sx[0], sx[1]);
+ sid2secondary.put(sx[0], sx[2]);
+ }
+ br.close();
+ } catch (Exception e) {
+ // missing file is normal
+ }
+ }
+
+ /**
+ * Set up redirection. If a request is to be sent to subscription ID sid, and that is configured to go to URL primary, instead, go to secondary.
+ *
+ * @param sid The subscription ID to be redirected
+ * @param primary The URL associated with that subscription ID
+ * @param secondary The replacement URL to use instead
+ */
+ public synchronized void redirect(String sid, String primary, String secondary) {
+ sid2primary.put(sid, primary);
+ sid2secondary.put(sid, secondary);
+ op.request();
+ }
+
+ /**
+ * Cancel redirection. If a request is to be sent to subscription ID sid, send it to its primary URL.
+ *
+ * @param sid The subscription ID to remove from the table.
+ */
+ public synchronized void forget(String sid) {
+ sid2primary.remove(sid);
+ sid2secondary.remove(sid);
+ op.request();
+ }
+
+ /**
+ * Look up where to send a subscription. If the primary has changed or there is no redirection, use the primary. Otherwise, redirect to the secondary URL.
+ *
+ * @param sid The subscription ID to look up.
+ * @param primary The configured primary URL.
+ * @return The destination URL to really use.
+ */
+ public synchronized String lookup(String sid, String primary) {
+ String oprim = sid2primary.get(sid);
+ if (primary.equals(oprim)) {
+ return (sid2secondary.get(sid));
+ } else if (oprim != null) {
+ forget(sid);
+ }
+ return (primary);
+ }
+
+ /**
+ * Is a subscription redirected?
+ */
+ public synchronized boolean isRedirected(String sid) {
+ return (sid != null && sid2secondary.get(sid) != null);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.regex.*;\r
-import java.util.*;\r
-import java.io.*;\r
-import java.nio.file.*;\r
-import java.text.*;\r
-\r
-/**\r
- * Logging for data router delivery events (PUB/DEL/EXP)\r
- */\r
-public class StatusLog {\r
- private static StatusLog instance = new StatusLog();\r
- private HashSet<String> toship = new HashSet<String>();\r
- private SimpleDateFormat filedate;\r
- private String prefix = "logs/events";\r
- private String suffix = ".log";\r
- private String plainfile;\r
- private String curfile;\r
- private long nexttime;\r
- private OutputStream os;\r
- private long intvl;\r
- private NodeConfigManager config = NodeConfigManager.getInstance();\r
- {\r
- try { filedate = new SimpleDateFormat("-yyyyMMddHHmm"); } catch (Exception e) {}\r
- }\r
- /**\r
- * Parse an interval of the form xxhyymzzs and round it to the nearest whole fraction of 24 hours. If no units are specified, assume seconds.\r
- */\r
- public static long parseInterval(String interval, int def) {\r
- try {\r
- Matcher m = Pattern.compile("(?:(\\d+)[Hh])?(?:(\\d+)[Mm])?(?:(\\d+)[Ss]?)?").matcher(interval);\r
- if (m.matches()) {\r
- int dur = 0;\r
- String x = m.group(1);\r
- if (x != null) {\r
- dur += 3600 * Integer.parseInt(x);\r
- }\r
- x = m.group(2);\r
- if (x != null) {\r
- dur += 60 * Integer.parseInt(x);\r
- }\r
- x = m.group(3);\r
- if (x != null) {\r
- dur += Integer.parseInt(x);\r
- }\r
- if (dur < 60) {\r
- dur = 60;\r
- }\r
- int best = 86400;\r
- int dist = best - dur;\r
- if (dur > best) {\r
- dist = dur - best;\r
- }\r
- int base = 1;\r
- for (int i = 0; i < 8; i++) {\r
- int base2 = base;\r
- base *= 2;\r
- for (int j = 0; j < 4; j++) {\r
- int base3 = base2;\r
- base2 *= 3;\r
- for (int k = 0; k < 3; k++) {\r
- int cur = base3;\r
- base3 *= 5;\r
- int ndist = cur - dur;\r
- if (dur > cur) {\r
- ndist = dur - cur;\r
- }\r
- if (ndist < dist) {\r
- best = cur;\r
- dist = ndist;\r
- }\r
- }\r
- }\r
- }\r
- def = best * 1000;\r
- }\r
- } catch (Exception e) {\r
- }\r
- return(def);\r
- }\r
- private synchronized void checkRoll(long now) throws IOException {\r
- if (now >= nexttime) {\r
- if (os != null) {\r
- os.close();\r
- os = null;\r
- }\r
- intvl = parseInterval(config.getEventLogInterval(), 300000);\r
- prefix = config.getEventLogPrefix();\r
- suffix = config.getEventLogSuffix();\r
- nexttime = now - now % intvl + intvl;\r
- curfile = prefix + filedate.format(new Date(nexttime - intvl)) + suffix;\r
- plainfile = prefix + suffix;\r
- notify();\r
- }\r
- }\r
- /**\r
- * Get the name of the current log file\r
- * @return The full path name of the current event log file\r
- */\r
- public static synchronized String getCurLogFile() {\r
- try {\r
- instance.checkRoll(System.currentTimeMillis());\r
- } catch (Exception e) {\r
- }\r
- return(instance.curfile);\r
- }\r
- private synchronized void log(String s) {\r
- try {\r
- long now = System.currentTimeMillis();\r
- checkRoll(now);\r
- if (os == null) {\r
- os = new FileOutputStream(curfile, true);\r
- (new File(plainfile)).delete();\r
- Files.createLink(Paths.get(plainfile), Paths.get(curfile));\r
- }\r
- os.write((NodeUtils.logts(new Date(now)) + '|' + s + '\n').getBytes());\r
- os.flush();\r
- } catch (IOException ioe) {\r
- }\r
- }\r
- /**\r
- * Log a received publication attempt.\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed id given by the publisher\r
- * @param requrl The URL of the received request\r
- * @param method The method (DELETE or PUT) in the received request\r
- * @param ctype The content type (if method is PUT and clen > 0)\r
- * @param clen The content length (if method is PUT)\r
- * @param srcip The IP address of the publisher\r
- * @param user The identity of the publisher\r
- * @param status The status returned to the publisher\r
- */\r
- public static void logPub(String pubid, String feedid, String requrl, String method, String ctype, long clen, String srcip, String user, int status) {\r
- instance.log("PUB|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + srcip + "|" + user + "|" + status);\r
- }\r
- /**\r
- * Log a data transfer error receiving a publication attempt\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed id given by the publisher\r
- * @param requrl The URL of the received request\r
- * @param method The method (DELETE or PUT) in the received request\r
- * @param ctype The content type (if method is PUT and clen > 0)\r
- * @param clen The expected content length (if method is PUT)\r
- * @param rcvd The content length received\r
- * @param srcip The IP address of the publisher\r
- * @param user The identity of the publisher\r
- * @param error The error message from the IO exception\r
- */\r
- public static void logPubFail(String pubid, String feedid, String requrl, String method, String ctype, long clen, long rcvd, String srcip, String user, String error) {\r
- instance.log("PBF|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + rcvd + "|" + srcip + "|" + user + "|" + error);\r
- }\r
- /**\r
- * Log a delivery attempt.\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed ID\r
- * @param subid The (space delimited list of) subscription ID\r
- * @param requrl The URL used in the attempt\r
- * @param method The method (DELETE or PUT) in the attempt\r
- * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)\r
- * @param clen The content length (if PUT and not metaonly)\r
- * @param user The identity given to the subscriber\r
- * @param status The status returned by the subscriber or -1 if an exeception occured trying to connect\r
- * @param xpubid The publish ID returned by the subscriber\r
- */\r
- public static void logDel(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String user, int status, String xpubid) {\r
- if (feedid == null) {\r
- return;\r
- }\r
- instance.log("DEL|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + user + "|" + status + "|" + xpubid);\r
- }\r
- /**\r
- * Log delivery attempts expired\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed ID\r
- * @param subid The (space delimited list of) subscription ID\r
- * @param requrl The URL that would be delivered to\r
- * @param method The method (DELETE or PUT) in the request\r
- * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)\r
- * @param clen The content length (if PUT and not metaonly)\r
- * @param reason The reason the attempts were discontinued\r
- * @param attempts The number of attempts made\r
- */\r
- public static void logExp(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String reason, int attempts) {\r
- if (feedid == null) {\r
- return;\r
- }\r
- instance.log("EXP|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + reason + "|" + attempts);\r
- }\r
- /**\r
- * Log extra statistics about unsuccessful delivery attempts.\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed ID\r
- * @param subid The (space delimited list of) subscription ID\r
- * @param clen The content length\r
- * @param sent The # of bytes sent or -1 if subscriber returned an error instead of 100 Continue, otherwise, the number of bytes sent before an error occurred.\r
- */\r
- public static void logDelExtra(String pubid, String feedid, String subid, long clen, long sent) {\r
- if (feedid == null) {\r
- return;\r
- }\r
- instance.log("DLX|" + pubid + "|" + feedid + "|" + subid + "|" + clen + "|" + sent);\r
- }\r
- private StatusLog() {\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.regex.*;
+import java.util.*;
+import java.io.*;
+import java.nio.file.*;
+import java.text.*;
+
+/**
+ * Logging for data router delivery events (PUB/DEL/EXP)
+ */
+public class StatusLog {
+ private static StatusLog instance = new StatusLog();
+ private HashSet<String> toship = new HashSet<String>();
+ private SimpleDateFormat filedate;
+ private String prefix = "logs/events";
+ private String suffix = ".log";
+ private String plainfile;
+ private String curfile;
+ private long nexttime;
+ private OutputStream os;
+ private long intvl;
+ private NodeConfigManager config = NodeConfigManager.getInstance();
+
+ {
+ try {
+ filedate = new SimpleDateFormat("-yyyyMMddHHmm");
+ } catch (Exception e) {
+ }
+ }
+
+ /**
+ * Parse an interval of the form xxhyymzzs and round it to the nearest whole fraction of 24 hours. If no units are specified, assume seconds.
+ */
+ public static long parseInterval(String interval, int def) {
+ try {
+ Matcher m = Pattern.compile("(?:(\\d+)[Hh])?(?:(\\d+)[Mm])?(?:(\\d+)[Ss]?)?").matcher(interval);
+ if (m.matches()) {
+ int dur = 0;
+ String x = m.group(1);
+ if (x != null) {
+ dur += 3600 * Integer.parseInt(x);
+ }
+ x = m.group(2);
+ if (x != null) {
+ dur += 60 * Integer.parseInt(x);
+ }
+ x = m.group(3);
+ if (x != null) {
+ dur += Integer.parseInt(x);
+ }
+ if (dur < 60) {
+ dur = 60;
+ }
+ int best = 86400;
+ int dist = best - dur;
+ if (dur > best) {
+ dist = dur - best;
+ }
+ int base = 1;
+ for (int i = 0; i < 8; i++) {
+ int base2 = base;
+ base *= 2;
+ for (int j = 0; j < 4; j++) {
+ int base3 = base2;
+ base2 *= 3;
+ for (int k = 0; k < 3; k++) {
+ int cur = base3;
+ base3 *= 5;
+ int ndist = cur - dur;
+ if (dur > cur) {
+ ndist = dur - cur;
+ }
+ if (ndist < dist) {
+ best = cur;
+ dist = ndist;
+ }
+ }
+ }
+ }
+ def = best * 1000;
+ }
+ } catch (Exception e) {
+ }
+ return (def);
+ }
+
+ private synchronized void checkRoll(long now) throws IOException {
+ if (now >= nexttime) {
+ if (os != null) {
+ os.close();
+ os = null;
+ }
+ intvl = parseInterval(config.getEventLogInterval(), 300000);
+ prefix = config.getEventLogPrefix();
+ suffix = config.getEventLogSuffix();
+ nexttime = now - now % intvl + intvl;
+ curfile = prefix + filedate.format(new Date(nexttime - intvl)) + suffix;
+ plainfile = prefix + suffix;
+ notify();
+ }
+ }
+
+ /**
+ * Get the name of the current log file
+ *
+ * @return The full path name of the current event log file
+ */
+ public static synchronized String getCurLogFile() {
+ try {
+ instance.checkRoll(System.currentTimeMillis());
+ } catch (Exception e) {
+ }
+ return (instance.curfile);
+ }
+
+ private synchronized void log(String s) {
+ try {
+ long now = System.currentTimeMillis();
+ checkRoll(now);
+ if (os == null) {
+ os = new FileOutputStream(curfile, true);
+ (new File(plainfile)).delete();
+ Files.createLink(Paths.get(plainfile), Paths.get(curfile));
+ }
+ os.write((NodeUtils.logts(new Date(now)) + '|' + s + '\n').getBytes());
+ os.flush();
+ } catch (IOException ioe) {
+ }
+ }
+
+ /**
+ * Log a received publication attempt.
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed id given by the publisher
+ * @param requrl The URL of the received request
+ * @param method The method (DELETE or PUT) in the received request
+ * @param ctype The content type (if method is PUT and clen > 0)
+ * @param clen The content length (if method is PUT)
+ * @param srcip The IP address of the publisher
+ * @param user The identity of the publisher
+ * @param status The status returned to the publisher
+ */
+ public static void logPub(String pubid, String feedid, String requrl, String method, String ctype, long clen, String srcip, String user, int status) {
+ instance.log("PUB|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + srcip + "|" + user + "|" + status);
+ }
+
+ /**
+ * Log a data transfer error receiving a publication attempt
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed id given by the publisher
+ * @param requrl The URL of the received request
+ * @param method The method (DELETE or PUT) in the received request
+ * @param ctype The content type (if method is PUT and clen > 0)
+ * @param clen The expected content length (if method is PUT)
+ * @param rcvd The content length received
+ * @param srcip The IP address of the publisher
+ * @param user The identity of the publisher
+ * @param error The error message from the IO exception
+ */
+ public static void logPubFail(String pubid, String feedid, String requrl, String method, String ctype, long clen, long rcvd, String srcip, String user, String error) {
+ instance.log("PBF|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + rcvd + "|" + srcip + "|" + user + "|" + error);
+ }
+
+ /**
+ * Log a delivery attempt.
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed ID
+ * @param subid The (space delimited list of) subscription ID
+ * @param requrl The URL used in the attempt
+ * @param method The method (DELETE or PUT) in the attempt
+ * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)
+ * @param clen The content length (if PUT and not metaonly)
+ * @param user The identity given to the subscriber
+ * @param status The status returned by the subscriber or -1 if an exeception occured trying to connect
+ * @param xpubid The publish ID returned by the subscriber
+ */
+ public static void logDel(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String user, int status, String xpubid) {
+ if (feedid == null) {
+ return;
+ }
+ instance.log("DEL|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + user + "|" + status + "|" + xpubid);
+ }
+
+ /**
+ * Log delivery attempts expired
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed ID
+ * @param subid The (space delimited list of) subscription ID
+ * @param requrl The URL that would be delivered to
+ * @param method The method (DELETE or PUT) in the request
+ * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)
+ * @param clen The content length (if PUT and not metaonly)
+ * @param reason The reason the attempts were discontinued
+ * @param attempts The number of attempts made
+ */
+ public static void logExp(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String reason, int attempts) {
+ if (feedid == null) {
+ return;
+ }
+ instance.log("EXP|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + reason + "|" + attempts);
+ }
+
+ /**
+ * Log extra statistics about unsuccessful delivery attempts.
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed ID
+ * @param subid The (space delimited list of) subscription ID
+ * @param clen The content length
+ * @param sent The # of bytes sent or -1 if subscriber returned an error instead of 100 Continue, otherwise, the number of bytes sent before an error occurred.
+ */
+ public static void logDelExtra(String pubid, String feedid, String subid, long clen, long sent) {
+ if (feedid == null) {
+ return;
+ }
+ instance.log("DLX|" + pubid + "|" + feedid + "|" + subid + "|" + clen + "|" + sent);
+ }
+
+ private StatusLog() {
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.net.*;\r
-\r
-/**\r
- * Compare IP addresses as byte arrays to a subnet specified as a CIDR\r
- */\r
-public class SubnetMatcher {\r
- private byte[] sn;\r
- private int len;\r
- private int mask;\r
- /**\r
- * Construct a subnet matcher given a CIDR\r
- * @param subnet The CIDR to match\r
- */\r
- public SubnetMatcher(String subnet) {\r
- int i = subnet.lastIndexOf('/');\r
- if (i == -1) {\r
- sn = NodeUtils.getInetAddress(subnet);\r
- len = sn.length;\r
- } else {\r
- len = Integer.parseInt(subnet.substring(i + 1));\r
- sn = NodeUtils.getInetAddress(subnet.substring(0, i));\r
- mask = ((0xff00) >> (len % 8)) & 0xff;\r
- len /= 8;\r
- }\r
- }\r
- /**\r
- * Is the IP address in the CIDR?\r
- * @param addr the IP address as bytes in network byte order\r
- * @return true if the IP address matches.\r
- */\r
- public boolean matches(byte[] addr) {\r
- if (addr.length != sn.length) {\r
- return(false);\r
- }\r
- for (int i = 0; i < len; i++) {\r
- if (addr[i] != sn[i]) {\r
- return(false);\r
- }\r
- }\r
- if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {\r
- return(false);\r
- }\r
- return(true);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.net.*;
+
+/**
+ * Compare IP addresses as byte arrays to a subnet specified as a CIDR
+ */
+public class SubnetMatcher {
+ private byte[] sn;
+ private int len;
+ private int mask;
+
+ /**
+ * Construct a subnet matcher given a CIDR
+ *
+ * @param subnet The CIDR to match
+ */
+ public SubnetMatcher(String subnet) {
+ int i = subnet.lastIndexOf('/');
+ if (i == -1) {
+ sn = NodeUtils.getInetAddress(subnet);
+ len = sn.length;
+ } else {
+ len = Integer.parseInt(subnet.substring(i + 1));
+ sn = NodeUtils.getInetAddress(subnet.substring(0, i));
+ mask = ((0xff00) >> (len % 8)) & 0xff;
+ len /= 8;
+ }
+ }
+
+ /**
+ * Is the IP address in the CIDR?
+ *
+ * @param addr the IP address as bytes in network byte order
+ * @return true if the IP address matches.
+ */
+ public boolean matches(byte[] addr) {
+ if (addr.length != sn.length) {
+ return (false);
+ }
+ for (int i = 0; i < len; i++) {
+ if (addr[i] != sn[i]) {
+ return (false);
+ }
+ }
+ if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {
+ return (false);
+ }
+ return (true);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * A destination to deliver a message\r
- */\r
-public class Target {\r
- private DestInfo destinfo;\r
- private String routing;\r
- /**\r
- * A destination to deliver a message\r
- * @param destinfo Either info for a subscription ID or info for a node-to-node transfer\r
- * @param routing For a node-to-node transfer, what to do when it gets there.\r
- */\r
- public Target(DestInfo destinfo, String routing) {\r
- this.destinfo = destinfo;\r
- this.routing = routing;\r
- }\r
- /**\r
- * Add additional routing\r
- */\r
- public void addRouting(String routing) {\r
- this.routing = this.routing + " " + routing;\r
- }\r
- /**\r
- * Get the destination information for this target\r
- */\r
- public DestInfo getDestInfo() {\r
- return(destinfo);\r
- }\r
- /**\r
- * Get the next hop information for this target\r
- */\r
- public String getRouting() {\r
- return(routing);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * A destination to deliver a message
+ */
+public class Target {
+ private DestInfo destinfo;
+ private String routing;
+
+ /**
+ * A destination to deliver a message
+ *
+ * @param destinfo Either info for a subscription ID or info for a node-to-node transfer
+ * @param routing For a node-to-node transfer, what to do when it gets there.
+ */
+ public Target(DestInfo destinfo, String routing) {
+ this.destinfo = destinfo;
+ this.routing = routing;
+ }
+
+ /**
+ * Add additional routing
+ */
+ public void addRouting(String routing) {
+ this.routing = this.routing + " " + routing;
+ }
+
+ /**
+ * Get the destination information for this target
+ */
+ public DestInfo getDestInfo() {
+ return (destinfo);
+ }
+
+ /**
+ * Get the next hop information for this target
+ */
+ public String getRouting() {
+ return (routing);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-\r
-/**\r
- * Manage a list of tasks to be executed when an event occurs.\r
- * This makes the following guarantees:\r
- * <ul>\r
- * <li>Tasks can be safely added and removed in the middle of a run.</li>\r
- * <li>No task will be returned more than once during a run.</li>\r
- * <li>No task will be returned when it is not, at that moment, in the list of tasks.</li>\r
- * <li>At the moment when next() returns null, all tasks on the list have been returned during the run.</li>\r
- * <li>Initially and once next() returns null during a run, next() will continue to return null until startRun() is called.\r
- * </ul>\r
- */\r
-public class TaskList {\r
- private Iterator<Runnable> runlist;\r
- private HashSet<Runnable> tasks = new HashSet<Runnable>();\r
- private HashSet<Runnable> togo;\r
- private HashSet<Runnable> sofar;\r
- private HashSet<Runnable> added;\r
- private HashSet<Runnable> removed;\r
- /**\r
- * Construct a new TaskList\r
- */\r
- public TaskList() {\r
- }\r
- /**\r
- * Start executing the sequence of tasks.\r
- */\r
- public synchronized void startRun() {\r
- sofar = new HashSet<Runnable>();\r
- added = new HashSet<Runnable>();\r
- removed = new HashSet<Runnable>();\r
- togo = new HashSet<Runnable>(tasks);\r
- runlist = togo.iterator();\r
- }\r
- /**\r
- * Get the next task to execute\r
- */\r
- public synchronized Runnable next() {\r
- while (runlist != null) {\r
- if (runlist.hasNext()) {\r
- Runnable task = runlist.next();\r
- if (removed.contains(task)) {\r
- continue;\r
- }\r
- if (sofar.contains(task)) {\r
- continue;\r
- }\r
- sofar.add(task);\r
- return(task);\r
- }\r
- if (added.size() != 0) {\r
- togo = added;\r
- added = new HashSet<Runnable>();\r
- removed.clear();\r
- runlist = togo.iterator();\r
- continue;\r
- }\r
- togo = null;\r
- added = null;\r
- removed = null;\r
- sofar = null;\r
- runlist = null;\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Add a task to the list of tasks to run whenever the event occurs.\r
- */\r
- public synchronized void addTask(Runnable task) {\r
- if (runlist != null) {\r
- added.add(task);\r
- removed.remove(task);\r
- }\r
- tasks.add(task);\r
- }\r
- /**\r
- * Remove a task from the list of tasks to run whenever the event occurs.\r
- */\r
- public synchronized void removeTask(Runnable task) {\r
- if (runlist != null) {\r
- removed.add(task);\r
- added.remove(task);\r
- }\r
- tasks.remove(task);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+
+/**
+ * Manage a list of tasks to be executed when an event occurs.
+ * This makes the following guarantees:
+ * <ul>
+ * <li>Tasks can be safely added and removed in the middle of a run.</li>
+ * <li>No task will be returned more than once during a run.</li>
+ * <li>No task will be returned when it is not, at that moment, in the list of tasks.</li>
+ * <li>At the moment when next() returns null, all tasks on the list have been returned during the run.</li>
+ * <li>Initially and once next() returns null during a run, next() will continue to return null until startRun() is called.
+ * </ul>
+ */
+public class TaskList {
+ private Iterator<Runnable> runlist;
+ private HashSet<Runnable> tasks = new HashSet<Runnable>();
+ private HashSet<Runnable> togo;
+ private HashSet<Runnable> sofar;
+ private HashSet<Runnable> added;
+ private HashSet<Runnable> removed;
+
+ /**
+ * Construct a new TaskList
+ */
+ public TaskList() {
+ }
+
+ /**
+ * Start executing the sequence of tasks.
+ */
+ public synchronized void startRun() {
+ sofar = new HashSet<Runnable>();
+ added = new HashSet<Runnable>();
+ removed = new HashSet<Runnable>();
+ togo = new HashSet<Runnable>(tasks);
+ runlist = togo.iterator();
+ }
+
+ /**
+ * Get the next task to execute
+ */
+ public synchronized Runnable next() {
+ while (runlist != null) {
+ if (runlist.hasNext()) {
+ Runnable task = runlist.next();
+ if (removed.contains(task)) {
+ continue;
+ }
+ if (sofar.contains(task)) {
+ continue;
+ }
+ sofar.add(task);
+ return (task);
+ }
+ if (added.size() != 0) {
+ togo = added;
+ added = new HashSet<Runnable>();
+ removed.clear();
+ runlist = togo.iterator();
+ continue;
+ }
+ togo = null;
+ added = null;
+ removed = null;
+ sofar = null;
+ runlist = null;
+ }
+ return (null);
+ }
+
+ /**
+ * Add a task to the list of tasks to run whenever the event occurs.
+ */
+ public synchronized void addTask(Runnable task) {
+ if (runlist != null) {
+ added.add(task);
+ removed.remove(task);
+ }
+ tasks.add(task);
+ }
+
+ /**
+ * Remove a task from the list of tasks to run whenever the event occurs.
+ */
+ public synchronized void removeTask(Runnable task) {
+ if (runlist != null) {
+ removed.add(task);
+ added.remove(task);
+ }
+ tasks.remove(task);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-package org.onap.dmaap.datarouter.node.eelf;\r
-\r
-import ch.qos.logback.classic.spi.ILoggingEvent;\r
-import ch.qos.logback.core.filter.Filter;\r
-import ch.qos.logback.core.spi.FilterReply;\r
-\r
-/*\r
- * When EELF functionality added it default started logging Jetty logs as well which in turn stopped existing functionality of logging jetty statements in node.log\r
- * added code in logback.xml to add jetty statements in node.log.\r
- * This class removes extran EELF statements from node.log since they are being logged in apicalls.log \r
- */\r
-public class EELFFilter extends Filter<ILoggingEvent>{\r
- @Override\r
- public FilterReply decide(ILoggingEvent event) { \r
- if (event.getMessage().contains("EELF")) {\r
- return FilterReply.DENY;\r
- } else {\r
- return FilterReply.ACCEPT;\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package org.onap.dmaap.datarouter.node.eelf;
+
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.filter.Filter;
+import ch.qos.logback.core.spi.FilterReply;
+
+/*
+ * When EELF functionality added it default started logging Jetty logs as well which in turn stopped existing functionality of logging jetty statements in node.log
+ * added code in logback.xml to add jetty statements in node.log.
+ * This class removes extran EELF statements from node.log since they are being logged in apicalls.log
+ */
+public class EELFFilter extends Filter<ILoggingEvent> {
+ @Override
+ public FilterReply decide(ILoggingEvent event) {
+ if (event.getMessage().contains("EELF")) {
+ return FilterReply.DENY;
+ } else {
+ return FilterReply.ACCEPT;
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-package org.onap.dmaap.datarouter.node.eelf;\r
-\r
-import com.att.eelf.i18n.EELFResolvableErrorEnum;\r
-import com.att.eelf.i18n.EELFResourceManager;\r
-\r
-public enum EelfMsgs implements EELFResolvableErrorEnum {\r
- \r
- /**\r
- * Application message prints user (accepts one argument)\r
- */\r
- MESSAGE_WITH_BEHALF,\r
-\r
- /**\r
- * Application message prints user and FeedID (accepts two arguments)\r
- */\r
-\r
- MESSAGE_WITH_BEHALF_AND_FEEDID,\r
- \r
- /**\r
- * Application message prints keystore file error in EELF errors log\r
- */\r
-\r
- MESSAGE_KEYSTORE_LOAD_ERROR,\r
- \r
- /**\r
- * Application message prints Error extracting my name from my keystore file\r
- */\r
-\r
- MESSAGE_KEYSORE_NAME_ERROR, \r
- \r
- /**\r
- * Application message prints Error parsing configuration data from provisioning server.\r
- */\r
-\r
-\r
- MESSAGE_PARSING_ERROR, \r
- \r
- /**\r
- * Application message printsConfiguration failed\r
- */\r
-\r
-\r
- MESSAGE_CONF_FAILED, \r
- \r
- /**\r
- * Application message prints Bad provisioning server URL\r
- */\r
-\r
-\r
- MESSAGE_BAD_PROV_URL, \r
- \r
- /**\r
- * Application message prints Unable to fetch canonical name from keystore file\r
- */\r
-\r
-\r
- MESSAGE_KEYSTORE_FETCH_ERROR,\r
- \r
- /**\r
- * Application message prints Unable to load local configuration file.\r
- */\r
-\r
-\r
- MESSAGE_PROPERTIES_LOAD_ERROR;\r
-\r
- \r
- /**\r
- * Static initializer to ensure the resource bundles for this class are loaded...\r
- * Here this application loads messages from three bundles\r
- */\r
- static {\r
- EELFResourceManager.loadMessageBundle("EelfMessages");\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package org.onap.dmaap.datarouter.node.eelf;
+
+import com.att.eelf.i18n.EELFResolvableErrorEnum;
+import com.att.eelf.i18n.EELFResourceManager;
+
+public enum EelfMsgs implements EELFResolvableErrorEnum {
+
+ /**
+ * Application message prints user (accepts one argument)
+ */
+ MESSAGE_WITH_BEHALF,
+
+ /**
+ * Application message prints user and FeedID (accepts two arguments)
+ */
+
+ MESSAGE_WITH_BEHALF_AND_FEEDID,
+
+ /**
+ * Application message prints keystore file error in EELF errors log
+ */
+
+ MESSAGE_KEYSTORE_LOAD_ERROR,
+
+ /**
+ * Application message prints Error extracting my name from my keystore file
+ */
+
+ MESSAGE_KEYSORE_NAME_ERROR,
+
+ /**
+ * Application message prints Error parsing configuration data from provisioning server.
+ */
+
+
+ MESSAGE_PARSING_ERROR,
+
+ /**
+ * Application message printsConfiguration failed
+ */
+
+
+ MESSAGE_CONF_FAILED,
+
+ /**
+ * Application message prints Bad provisioning server URL
+ */
+
+
+ MESSAGE_BAD_PROV_URL,
+
+ /**
+ * Application message prints Unable to fetch canonical name from keystore file
+ */
+
+
+ MESSAGE_KEYSTORE_FETCH_ERROR,
+
+ /**
+ * Application message prints Unable to load local configuration file.
+ */
+
+
+ MESSAGE_PROPERTIES_LOAD_ERROR;
+
+
+ /**
+ * Static initializer to ensure the resource bundles for this class are loaded...
+ * Here this application loads messages from three bundles
+ */
+ static {
+ EELFResourceManager.loadMessageBundle("EelfMessages");
+ }
+}
-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- * \r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- * \r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<configuration scan="true" scanPeriod="3 seconds" debug="true">\r
- <!--<jmxConfigurator /> -->\r
- <!-- directory path for all other type logs -->\r
- <!-- property name="logDir" value="/home/eby/dr2/logs" / -->\r
- <property name="logDir" value="/opt/app/datartr/logs" />\r
- \r
- <!-- directory path for debugging type logs -->\r
- <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->\r
- \r
- <!-- specify the component name \r
- <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->\r
- <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->\r
- <!-- property name="componentName" value="logs"></property -->\r
- \r
- <!-- log file names -->\r
- <property name="generalLogName" value="apicalls" />\r
- <!-- name="securityLogName" value="security" -->\r
- <!-- name="performanceLogName" value="performance" -->\r
- <!-- name="serverLogName" value="server" -->\r
- <!-- name="policyLogName" value="policy"-->\r
- <property name="errorLogName" value="errors" />\r
- <!-- name="metricsLogName" value="metrics" -->\r
- <!-- name="auditLogName" value="audit" -->\r
- <!-- name="debugLogName" value="debug" -->\r
- <property name="jettyAndNodeLogName" value="node"></property> \r
- <property name="defaultPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />\r
- <property name="jettyAndNodeLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />\r
- \r
- <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />\r
- \r
- <property name="logDirectory" value="${logDir}" />\r
- <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->\r
- \r
- \r
- <!-- Example evaluator filter applied against console appender -->\r
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
-\r
- <!-- ============================================================================ -->\r
- <!-- EELF Appenders -->\r
- <!-- ============================================================================ -->\r
-\r
- <!-- The EELFAppender is used to record events to the general application \r
- log -->\r
- \r
- \r
- <appender name="EELF"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${generalLogName}.log</file>\r
- <filter class="ch.qos.logback.classic.filter.LevelFilter">\r
- <level>INFO</level>\r
- <onMatch>ACCEPT</onMatch>\r
- <onMismatch>DENY</onMismatch>\r
- </filter>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELF" />\r
- </appender>\r
-\r
- <!-- EELF Security Appender. This appender is used to record security events \r
- to the security log file. Security events are separate from other loggers \r
- in EELF so that security log records can be captured and managed in a secure \r
- way separate from the other logs. This appender is set to never discard any \r
- events. -->\r
- <!--appender name="EELFSecurity"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${securityLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <discardingThreshold>0</discardingThreshold>\r
- <appender-ref ref="EELFSecurity" />\r
- </appender-->\r
-\r
- <!-- EELF Performance Appender. This appender is used to record performance \r
- records. -->\r
- <!--appender name="EELFPerformance"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${performanceLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <outputPatternAsHeader>true</outputPatternAsHeader>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFPerformance" />\r
- </appender-->\r
-\r
- <!-- EELF Server Appender. This appender is used to record Server related \r
- logging events. The Server logger and appender are specializations of the \r
- EELF application root logger and appender. This can be used to segregate Server \r
- events from other components, or it can be eliminated to record these events \r
- as part of the application root log. -->\r
- <!--appender name="EELFServer"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${serverLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFServer" />\r
- </appender-->\r
-\r
- \r
- <!-- EELF Policy Appender. This appender is used to record Policy engine \r
- related logging events. The Policy logger and appender are specializations \r
- of the EELF application root logger and appender. This can be used to segregate \r
- Policy engine events from other components, or it can be eliminated to record \r
- these events as part of the application root log. -->\r
- <!--appender name="EELFPolicy"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${policyLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFPolicy" >\r
- </appender-->\r
- \r
- \r
- <!-- EELF Audit Appender. This appender is used to record audit engine \r
- related logging events. The audit logger and appender are specializations \r
- of the EELF application root logger and appender. This can be used to segregate \r
- Policy engine events from other components, or it can be eliminated to record \r
- these events as part of the application root log. -->\r
- \r
- <!--appender name="EELFAudit"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${auditLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFAudit" />\r
- </appender-->\r
-\r
-<!--appender name="EELFMetrics"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${metricsLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder-->\r
- <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - \r
- %msg%n"</pattern> -->\r
- <!--pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- \r
- <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFMetrics"/>\r
- </appender-->\r
- \r
- <appender name="EELFError"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${errorLogName}.log</file>\r
- <filter class="ch.qos.logback.classic.filter.LevelFilter">\r
- <level>ERROR</level>\r
- <onMatch>ACCEPT</onMatch>\r
- <onMismatch>DENY</onMismatch>\r
- </filter>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFError"/>\r
- </appender>\r
- \r
- <!-- ============================================================================ -->\r
- <appender name="jettyAndNodelog"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${jettyAndNodeLogName}.log</file>\r
- <filter class="org.onap.dmaap.datarouter.node.eelf.EELFFilter" />\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${jettyAndNodeLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${jettyAndNodeLoggerPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFjettyAndNodelog" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="jettyAndNodelog" />\r
- <includeCallerData>true</includeCallerData>\r
- </appender>\r
- \r
- <!-- ============================================================================ -->\r
-\r
-\r
- <!--appender name="EELFDebug"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${debugLogDirectory}/${debugLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${debugLoggerPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFDebug" />\r
- <includeCallerData>true</includeCallerData>\r
- </appender-->\r
- \r
- \r
- <!-- ============================================================================ -->\r
- <!-- EELF loggers -->\r
- <!-- ============================================================================ -->\r
- <logger name="com.att.eelf" level="info" additivity="false">\r
- <appender-ref ref="asyncEELF" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.error" level="error" additivity="false">\r
- <appender-ref ref="asyncEELFError" />\r
- </logger>\r
- \r
- <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">\r
- <appender-ref ref="asyncEELFjettyAndNodelog"/>\r
- </logger> \r
- \r
- <!-- logger name="com.att.eelf.security" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFSecurity" /> \r
- </logger>\r
- <logger name="com.att.eelf.perf" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFPerformance" />\r
- </logger>\r
- <logger name="com.att.eelf.server" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFServer" />\r
- </logger>\r
- <logger name="com.att.eelf.policy" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFPolicy" />\r
- </logger>\r
-\r
- <logger name="com.att.eelf.audit" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFAudit" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.metrics" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFMetrics" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.debug" level="debug" additivity="false">\r
- <appender-ref ref="asyncEELFDebug" />\r
- </logger-->\r
-\r
- \r
-\r
- \r
- <root level="INFO">\r
- <appender-ref ref="asyncEELF" />\r
- <appender-ref ref="asyncEELFError" />\r
- <appender-ref ref="asyncEELFjettyAndNodelog" />\r
- </root>\r
-\r
-</configuration>\r
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<configuration scan="true" scanPeriod="3 seconds" debug="true">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <!-- property name="logDir" value="/home/eby/dr2/logs" / -->
+ <property name="logDir" value="/opt/app/datartr/logs" />
+
+ <!-- directory path for debugging type logs -->
+ <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->
+
+ <!-- specify the component name
+ <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->
+ <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->
+ <!-- property name="componentName" value="logs"></property -->
+
+ <!-- log file names -->
+ <property name="generalLogName" value="apicalls" />
+ <!-- name="securityLogName" value="security" -->
+ <!-- name="performanceLogName" value="performance" -->
+ <!-- name="serverLogName" value="server" -->
+ <!-- name="policyLogName" value="policy"-->
+ <property name="errorLogName" value="errors" />
+ <!-- name="metricsLogName" value="metrics" -->
+ <!-- name="auditLogName" value="audit" -->
+ <!-- name="debugLogName" value="debug" -->
+ <property name="jettyAndNodeLogName" value="node"></property>
+ <property name="defaultPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />
+ <property name="jettyAndNodeLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />
+
+ <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />
+
+ <property name="logDirectory" value="${logDir}" />
+ <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->
+
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+ <!-- EELF Security Appender. This appender is used to record security events
+ to the security log file. Security events are separate from other loggers
+ in EELF so that security log records can be captured and managed in a secure
+ way separate from the other logs. This appender is set to never discard any
+ events. -->
+ <!--appender name="EELFSecurity"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${securityLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="EELFSecurity" />
+ </appender-->
+
+ <!-- EELF Performance Appender. This appender is used to record performance
+ records. -->
+ <!--appender name="EELFPerformance"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${performanceLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <outputPatternAsHeader>true</outputPatternAsHeader>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPerformance" />
+ </appender-->
+
+ <!-- EELF Server Appender. This appender is used to record Server related
+ logging events. The Server logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate Server
+ events from other components, or it can be eliminated to record these events
+ as part of the application root log. -->
+ <!--appender name="EELFServer"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${serverLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFServer" />
+ </appender-->
+
+
+ <!-- EELF Policy Appender. This appender is used to record Policy engine
+ related logging events. The Policy logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+ <!--appender name="EELFPolicy"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${policyLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPolicy" >
+ </appender-->
+
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+
+ <!--appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender-->
+
+<!--appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder-->
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} -
+ %msg%n"</pattern> -->
+ <!--pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics"/>
+ </appender-->
+
+ <appender name="EELFError"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFError"/>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <appender name="jettyAndNodelog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${jettyAndNodeLogName}.log</file>
+ <filter class="org.onap.dmaap.datarouter.node.eelf.EELFFilter" />
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${jettyAndNodeLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${jettyAndNodeLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFjettyAndNodelog" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="jettyAndNodelog" />
+ <includeCallerData>true</includeCallerData>
+ </appender>
+
+ <!-- ============================================================================ -->
+
+
+ <!--appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${debugLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${debugLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>true</includeCallerData>
+ </appender-->
+
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ </logger>
+
+ <logger name="com.att.eelf.error" level="error" additivity="false">
+ <appender-ref ref="asyncEELFError" />
+ </logger>
+
+ <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">
+ <appender-ref ref="asyncEELFjettyAndNodelog"/>
+ </logger>
+
+ <!-- logger name="com.att.eelf.security" level="info" additivity="false">
+ <appender-ref ref="asyncEELFSecurity" />
+ </logger>
+ <logger name="com.att.eelf.perf" level="info" additivity="false">
+ <appender-ref ref="asyncEELFPerformance" />
+ </logger>
+ <logger name="com.att.eelf.server" level="info" additivity="false">
+ <appender-ref ref="asyncEELFServer" />
+ </logger>
+ <logger name="com.att.eelf.policy" level="info" additivity="false">
+ <appender-ref ref="asyncEELFPolicy" />
+ </logger>
+
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <logger name="com.att.eelf.debug" level="debug" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+ </logger-->
+
+
+
+
+ <root level="INFO">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFError" />
+ <appender-ref ref="asyncEELFjettyAndNodelog" />
+ </root>
+
+</configuration>
-<?xml version="1.0" encoding="utf-8"?>\r
-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- * \r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- * \r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<descriptor version="1" xmlns="http://aft.att.com/swm/descriptor">\r
- <platforms>\r
- <platform os="Linux" osVersions="*" architecture="*"/>\r
- </platforms>\r
- <paths>\r
- <path name="/opt/app/datartr" user="datartr" group="datartr" permissions="755,644" recursive="true"/>\r
- <path name="/opt/app/platform/init.d/drtrnode" user="datartr" group="datartr" permissions="755"/>\r
- </paths>\r
- <actions>\r
- <action type="INIT">\r
- <proc stage="POST" user="datartr" group="datartr"/>\r
- </action>\r
- <action type="FALL">\r
- <proc stage="PRE" user="datartr" group="datartr"/>\r
- <proc stage="POST" user="datartr" group="datartr"/>\r
- </action>\r
- <action type="INST">\r
- <proc stage="PRE" user="datartr" group="datartr"/>\r
- <proc stage="POST" user="datartr" group="datartr"/>\r
- </action>\r
- <action type="DINST">\r
- <proc stage="PRE" user="datartr" group="datartr"/>\r
- </action>\r
- </actions>\r
- <dependencies>\r
- <dependencyFilter componentName="com.att.java:jdk8lin" versions="[1.8.0.77-02]" sequence="1"/>\r
- <dependencyFilter componentName="com.att.platform:initd" versions="[1.0.15,)" sequence="2"/>\r
- <dependencyFilter componentName="com.att.dmaap.datarouter:util" versions="[1.0.7,)" sequence="3"/>\r
- </dependencies>\r
-</descriptor>\r
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<descriptor version="1" xmlns="http://aft.att.com/swm/descriptor">
+ <platforms>
+ <platform os="Linux" osVersions="*" architecture="*"/>
+ </platforms>
+ <paths>
+ <path name="/opt/app/datartr" user="datartr" group="datartr" permissions="755,644" recursive="true"/>
+ <path name="/opt/app/platform/init.d/drtrnode" user="datartr" group="datartr" permissions="755"/>
+ </paths>
+ <actions>
+ <action type="INIT">
+ <proc stage="POST" user="datartr" group="datartr"/>
+ </action>
+ <action type="FALL">
+ <proc stage="PRE" user="datartr" group="datartr"/>
+ <proc stage="POST" user="datartr" group="datartr"/>
+ </action>
+ <action type="INST">
+ <proc stage="PRE" user="datartr" group="datartr"/>
+ <proc stage="POST" user="datartr" group="datartr"/>
+ </action>
+ <action type="DINST">
+ <proc stage="PRE" user="datartr" group="datartr"/>
+ </action>
+ </actions>
+ <dependencies>
+ <dependencyFilter componentName="com.att.java:jdk8lin" versions="[1.8.0.77-02]" sequence="1"/>
+ <dependencyFilter componentName="com.att.platform:initd" versions="[1.0.15,)" sequence="2"/>
+ <dependencyFilter componentName="com.att.dmaap.datarouter:util" versions="[1.0.7,)" sequence="3"/>
+ </dependencies>
+</descriptor>
do
case "$action" in
'backup')
- cp log4j.properties log4j.properties.save 2>/dev/null
- cp node.properties node.properties.save 2>/dev/null
- cp havecert havecert.save 2>/dev/null
- ;;
+ cp log4j.properties log4j.properties.save 2>/dev/null
+ cp node.properties node.properties.save 2>/dev/null
+ cp havecert havecert.save 2>/dev/null
+ ;;
'stop')
- /opt/app/platform/init.d/drtrnode stop
- ;;
+ /opt/app/platform/init.d/drtrnode stop
+ ;;
'start')
- /opt/app/platform/init.d/drtrnode start || exit 1
- ;;
+ /opt/app/platform/init.d/drtrnode start || exit 1
+ ;;
'config')
- /bin/bash log4j.properties.tmpl >log4j.properties
- /bin/bash node.properties.tmpl >node.properties
- /bin/bash havecert.tmpl >havecert
- echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.node
- chmod +x havecert
- rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
- ln -s ../init.d/drtrnode /opt/app/platform/rc.d/K90drtrnode
- ln -s ../init.d/drtrnode /opt/app/platform/rc.d/S10drtrnode
- ;;
+ /bin/bash log4j.properties.tmpl >log4j.properties
+ /bin/bash node.properties.tmpl >node.properties
+ /bin/bash havecert.tmpl >havecert
+ echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.node
+ chmod +x havecert
+ rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
+ ln -s ../init.d/drtrnode /opt/app/platform/rc.d/K90drtrnode
+ ln -s ../init.d/drtrnode /opt/app/platform/rc.d/S10drtrnode
+ ;;
'restore')
- cp log4j.properties.save log4j.properties 2>/dev/null
- cp node.properties.save node.properties 2>/dev/null
- cp havecert.save havecert 2>/dev/null
- ;;
+ cp log4j.properties.save log4j.properties 2>/dev/null
+ cp node.properties.save node.properties 2>/dev/null
+ cp havecert.save havecert 2>/dev/null
+ ;;
'clean')
- rm -f log4j.properties node.properties havecert log4j.properties.save node.properties.save havecert.save SHUTDOWN redirections.dat VERSION.node
- rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
- ;;
+ rm -f log4j.properties node.properties havecert log4j.properties.save node.properties.save havecert.save SHUTDOWN redirections.dat VERSION.node
+ rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
+ ;;
*)
- exit 1
- ;;
+ exit 1
+ ;;
esac
done
exit 0
export TZ
PATH=/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/java/jdk/jdk180/bin
export PATH
-CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
+CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
export CLASSPATH
pids() {
- ps -ef | grep java | grep node.NodeMain | sed -e 's/[^ ]* *//' -e 's/ .*//'
+ ps -ef | grep java | grep node.NodeMain | sed -e 's/[^ ]* *//' -e 's/ .*//'
}
start() {
- ID=`id -n -u`
- GRP=`id -n -g`
- if [ "$ID" != "root" ]
- then
- echo drtrnode must be started as user datartr not $ID
- exit 1
- fi
- if [ "$GRP" != "datartr" ]
- then
- echo drtrnode must be started as group datartr not $GRP
- exit 1
- fi
- cd /opt/app/datartr
- if etc/havecert
- then
- echo >/dev/null
- else
- echo No certificate file available. Cannot start
- exit 0
- fi
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- echo drtrnode already running
- exit 0
- fi
+ ID=`id -n -u`
+ GRP=`id -n -g`
+ if [ "$ID" != "root" ]
+ then
+ echo drtrnode must be started as user datartr not $ID
+ exit 1
+ fi
+ if [ "$GRP" != "datartr" ]
+ then
+ echo drtrnode must be started as group datartr not $GRP
+ exit 1
+ fi
+ cd /opt/app/datartr
+ if etc/havecert
+ then
+ echo >/dev/null
+ else
+ echo No certificate file available. Cannot start
+ exit 0
+ fi
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ echo drtrnode already running
+ exit 0
+ fi
- mkdir -p /opt/app/datartr/spool/s
- chmod 755 /opt/app/datartr/spool/s
+ mkdir -p /opt/app/datartr/spool/s
+ chmod 755 /opt/app/datartr/spool/s
- rm -f /opt/app/datartr/etc/SHUTDOWN
- nohup java org.onap.dmaap.datarouter.node.NodeMain </dev/null >/dev/null 2>&1 &
- sleep 5
- PIDS=`pids`
- if [ "$PIDS" = "" ]
- then
- echo drtrnode startup failed
- else
- echo drtrnode started
- fi
+ rm -f /opt/app/datartr/etc/SHUTDOWN
+ nohup java org.onap.dmaap.datarouter.node.NodeMain </dev/null >/dev/null 2>&1 &
+ sleep 5
+ PIDS=`pids`
+ if [ "$PIDS" = "" ]
+ then
+ echo drtrnode startup failed
+ else
+ echo drtrnode started
+ fi
}
stop() {
- ID=`id -n -u`
- GRP=`id -n -g`
- if [ "$ID" != "datartr" ]
- then
- echo drtrnode must be stopped as user datartr not $ID
- exit 1
- fi
- if [ "$GRP" != "datartr" ]
- then
- echo drtrnode must be stopped as group datartr not $GRP
- exit 1
- fi
- touch /opt/app/datartr/etc/SHUTDOWN
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- sleep 5
- kill -9 $PIDS
- sleep 5
- echo drtrnode stopped
- else
- echo drtrnode not running
- fi
+ ID=`id -n -u`
+ GRP=`id -n -g`
+ if [ "$ID" != "datartr" ]
+ then
+ echo drtrnode must be stopped as user datartr not $ID
+ exit 1
+ fi
+ if [ "$GRP" != "datartr" ]
+ then
+ echo drtrnode must be stopped as group datartr not $GRP
+ exit 1
+ fi
+ touch /opt/app/datartr/etc/SHUTDOWN
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ sleep 5
+ kill -9 $PIDS
+ sleep 5
+ echo drtrnode stopped
+ else
+ echo drtrnode not running
+ fi
}
status() {
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- echo drtrnode running
- else
- echo drtrnode not running
- fi
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ echo drtrnode running
+ else
+ echo drtrnode not running
+ fi
}
case "$1" in
'start')
- start
- ;;
+ start
+ ;;
'stop')
- stop
- ;;
+ stop
+ ;;
'restart')
- stop
- sleep 20
- start
- ;;
+ stop
+ sleep 20
+ start
+ ;;
'status')
- status
- ;;
+ status
+ ;;
*)
- echo "Usage: $0 { start | stop | restart }"
- exit 1
- ;;
+ echo "Usage: $0 { start | stop | restart }"
+ exit 1
+ ;;
esac
exit 0
cd /opt/app/datartr;
if [ -f ${DRTR_NODE_KSTOREFILE:-etc/keystore} ]
then
- exit 0
+ exit 0
fi
echo `date '+%F %T,000'` WARN Certificate file "${DRTR_NODE_KSTOREFILE:-etc/keystore}" is missing >>${DRTR_NODE_LOGS:-logs}/node.log
exit 1
This component is for the Data Router Node software.
The following pre-requisite components should already be present:
- com.att.aft.swm:swm-cli
- com.att.aft.swm:swm-node
- - SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
- com.att.platform:uam-auto
- com.att.java:jdk8lin
- com.att.platform:initd
- com.att.platform:port-fwd
- - SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
- com.att.dmaap.datarouter:util
+ com.att.aft.swm:swm-cli
+ com.att.aft.swm:swm-node
+ - SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
+ com.att.platform:uam-auto
+ com.att.java:jdk8lin
+ com.att.platform:initd
+ com.att.platform:port-fwd
+ - SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
+ com.att.dmaap.datarouter:util
In a non-production environment, the URL for fetching provisioning data from
the provisioning server must be overridden. This can be done by setting a SWM
variable prior to installing this component. The production (default) value for
this variable is:
- DRTR_PROV_INTURL=https://feeds-drtr.web.att.com/internal/prov
+ DRTR_PROV_INTURL=https://feeds-drtr.web.att.com/internal/prov
Similarly, the URL for uploading event logs to the log server must be overridden. This can also be done by setting a SWM variable. The production (default) value is:
- DRTR_LOG_URL=https://feeds-drtr.web.att.com/internal/logs
+ DRTR_LOG_URL=https://feeds-drtr.web.att.com/internal/logs
Other SWM variables that can be set are:
DRTR_NODE_INTHTTPPORT (default 8080)
- The TCP/IP port number the component should listen on for "go fetch"
- requests from the provisioning server
+ The TCP/IP port number the component should listen on for "go fetch"
+ requests from the provisioning server
DRTR_NODE_INTHTTPSPORT (default 8443)
- The TCP/IP port number the component should listen on for publish
- requests from feed publishers and other nodes
+ The TCP/IP port number the component should listen on for publish
+ requests from feed publishers and other nodes
DRTR_NODE_EXTHTTPSPORT (default 443)
- The TCP/IP port number the component should use for node-to-node
- transfers and for sending redirect requests back to publishers
+ The TCP/IP port number the component should use for node-to-node
+ transfers and for sending redirect requests back to publishers
DRTR_NODE_SPOOL (default /opt/app/datartr/spool)
- The directory where data files should be saved while in transit
+ The directory where data files should be saved while in transit
DRTR_NODE_LOGS (default /opt/app/datartr/logs)
- The directory where log files should be kept
+ The directory where log files should be kept
DRTR_NODE_LOG_RETENTION (default 30)
- How long a log file is kept before being deleted
+ How long a log file is kept before being deleted
DRTR_NODE_KSTOREFILE (default /opt/app/datartr/etc/keystore)
- The java keystore file containing the server certificate and private key
- for this server
+ The java keystore file containing the server certificate and private key
+ for this server
DRTR_NODE_KSTOREPASS (default changeit)
- The password for the keystore file
+ The password for the keystore file
DRTR_NODE_PVTKEYPASS (default changeit)
- The password for the private key in the keystore file
+ The password for the private key in the keystore file
DRTR_NODE_TSTOREFILE (by default, use the truststore from the Java JDK)
- The java keystore file containing the trusted certificate authority
- certificates
+ The java keystore file containing the trusted certificate authority
+ certificates
DRTR_NODE_TSTOREPASS (default changeit)
- The password for the trust store file. Only applies if a trust store
- file is specified.
+ The password for the trust store file. Only applies if a trust store
+ file is specified.