-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- *\r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- *\r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">\r
- <modelVersion>4.0.0</modelVersion>\r
- <parent>\r
- <groupId>org.onap.dmaap.datarouter</groupId>\r
- <artifactId>parent</artifactId>\r
- <version>1.0.1-SNAPSHOT</version>\r
- <relativePath>../pom.xml</relativePath>\r
- </parent>\r
- <artifactId>datarouter-node</artifactId>\r
- <packaging>jar</packaging>\r
- <name>datarouter-node</name>\r
- <url>https://github.com/att/DMAAP_DATAROUTER</url>\r
- <properties>\r
- <sonar.skip>false</sonar.skip>\r
- <sonar.jacoco.reportMissing.force.zero>true</sonar.jacoco.reportMissing.force.zero>\r
- <sitePath>/content/sites/site/${project.groupId}/${project.artifactId}/${project.version}</sitePath>\r
- <docker.location>${basedir}/target/${artifactId}</docker.location>\r
- <datarouter.node.image.name>onap/dmaap/datarouter-node</datarouter.node.image.name>\r
- </properties>\r
- <dependencies>\r
- <dependency>\r
- <groupId>junit</groupId>\r
- <artifactId>junit</artifactId>\r
- <version>3.8.1</version>\r
- <scope>test</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.json</groupId>\r
- <artifactId>json</artifactId>\r
- <version>20160810</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>javax.mail</groupId>\r
- <artifactId>javax.mail-api</artifactId>\r
- <version>1.5.1</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>com.att.eelf</groupId>\r
- <artifactId>eelf-core</artifactId>\r
- <version>0.0.1</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>javax.servlet</groupId>\r
- <artifactId>servlet-api</artifactId>\r
- <version>2.5</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>com.thoughtworks.xstream</groupId>\r
- <artifactId>xstream</artifactId>\r
- <version>1.4.7</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>ch.qos.logback</groupId>\r
- <artifactId>logback-classic</artifactId>\r
- <version>1.2.0</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>ch.qos.logback</groupId>\r
- <artifactId>logback-core</artifactId>\r
- <version>1.2.0</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-server</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-continuation</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-util</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-deploy</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-servlet</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-servlets</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-http</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-security</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-websocket</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.eclipse.jetty</groupId>\r
- <artifactId>jetty-io</artifactId>\r
- <version>7.6.14.v20131031</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.commons</groupId>\r
- <artifactId>commons-io</artifactId>\r
- <version>1.3.2</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>commons-lang</groupId>\r
- <artifactId>commons-lang</artifactId>\r
- <version>2.4</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>commons-io</groupId>\r
- <artifactId>commons-io</artifactId>\r
- <version>2.1</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.httpcomponents</groupId>\r
- <artifactId>httpcore</artifactId>\r
- <version>4.4</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>commons-codec</groupId>\r
- <artifactId>commons-codec</artifactId>\r
- <version>1.6</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.mozilla</groupId>\r
- <artifactId>rhino</artifactId>\r
- <version>1.7R3</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.james</groupId>\r
- <artifactId>apache-mime4j-core</artifactId>\r
- <version>0.7</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.apache.httpcomponents</groupId>\r
- <artifactId>httpclient</artifactId>\r
- <version>4.5.3</version>\r
- </dependency>\r
- <dependency>\r
- <groupId>org.sonatype.http-testing-harness</groupId>\r
- <artifactId>junit-runner</artifactId>\r
- <version>0.11</version>\r
- <exclusions>\r
- <exclusion>\r
- <groupId>org.databene</groupId>\r
- <artifactId>contiperf</artifactId>\r
- </exclusion>\r
- </exclusions>\r
- </dependency>\r
- <dependency>\r
- <groupId>log4j</groupId>\r
- <artifactId>log4j</artifactId>\r
- <version>1.2.17</version>\r
- <scope>compile</scope>\r
- </dependency>\r
- </dependencies>\r
- <profiles>\r
- <profile>\r
- <id>docker</id>\r
- <properties>\r
- <skipDockerBuild>false</skipDockerBuild>\r
- <skipDockerTag>false</skipDockerTag>\r
- <skipTests>true</skipTests>\r
- </properties>\r
- <build>\r
- <plugins>\r
- <plugin>\r
- <groupId>com.spotify</groupId>\r
- <artifactId>docker-maven-plugin</artifactId>\r
- <version>1.0.0</version>\r
- <configuration>\r
- <imageName>${onap.nexus.dockerregistry.daily}/${datarouter.node.image.name}</imageName>\r
- <dockerDirectory>${docker.location}</dockerDirectory>\r
- <serverId>${onap.nexus.dockerregistry.daily}</serverId>\r
- <skipDockerBuild>false</skipDockerBuild>\r
- <imageTags>\r
- <imageTag>${project.version}</imageTag>\r
- <imageTag>latest</imageTag>\r
- </imageTags>\r
- <forceTags>true</forceTags>\r
- <resources>\r
- <resource>\r
- <targetPath>/</targetPath>\r
- <directory>${project.basedir}</directory>\r
- <excludes>\r
- <exclude>target/**/*</exclude>\r
- <exclude>pom.xml</exclude>\r
- </excludes>\r
- </resource>\r
-\r
- <resource>\r
- <targetPath>/</targetPath>\r
- <directory>${project.build.directory}</directory>\r
- <include>**/**</include>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </plugin>\r
- </plugins>\r
- </build>\r
- </profile>\r
- </profiles>\r
-\r
- <build>\r
- <finalName>datarouter-node</finalName>\r
- <resources>\r
- <resource>\r
- <directory>src/main/resources</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/*.properties</include>\r
- </includes>\r
- </resource>\r
- <resource>\r
- <directory>src/main/resources</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/EelfMessages.properties</include>\r
- </includes>\r
- </resource>\r
- <resource>\r
- <directory>src/main/resources</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/log4j.properties</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- <plugins>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-compiler-plugin</artifactId>\r
- <configuration>\r
- <source>1.8</source>\r
- <target>1.8</target>\r
- </configuration>\r
- <version>3.6.0</version>\r
- </plugin>\r
- <plugin>\r
- <artifactId>maven-assembly-plugin</artifactId>\r
- <version>2.4</version>\r
- <configuration>\r
- <descriptorRefs>\r
- <descriptorRef>jar-with-dependencies</descriptorRef>\r
- </descriptorRefs>\r
- <outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>\r
- <archive>\r
- <manifest>\r
- <addClasspath>true</addClasspath>\r
- <mainClass>org.onap.dmaap.datarouter.node.NodeMain</mainClass>\r
- </manifest>\r
- </archive>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <id>make-assembly</id>\r
- <!-- this is used for inheritance merges -->\r
- <phase>package</phase>\r
- <!-- bind to the packaging phase -->\r
- <goals>\r
- <goal>single</goal>\r
- </goals>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-resources-plugin</artifactId>\r
- <version>2.7</version>\r
- <executions>\r
- <execution>\r
- <id>copy-docker-file</id>\r
- <phase>package</phase>\r
- <goals>\r
- <goal>copy-resources</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${docker.location}</outputDirectory>\r
- <overwrite>true</overwrite>\r
- <resources>\r
- <resource>\r
- <directory>${basedir}/src/main/resources/docker</directory>\r
- <filtering>true</filtering>\r
- <includes>\r
- <include>**/*</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>copy-resources</id>\r
- <phase>validate</phase>\r
- <goals>\r
- <goal>copy-resources</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${basedir}/target/opt/app/datartr/etc</outputDirectory>\r
- <resources>\r
- <resource>\r
- <directory>${basedir}/src/main/resources</directory>\r
- <includes>\r
- <include>misc/**</include>\r
- <include>**/**</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>copy-resources-1</id>\r
- <phase>validate</phase>\r
- <goals>\r
- <goal>copy-resources</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${basedir}/target/opt/app/datartr/self_signed</outputDirectory>\r
- <resources>\r
- <resource>\r
- <directory>${basedir}/self_signed</directory>\r
- <includes>\r
- <include>misc/**</include>\r
- <include>**/**</include>\r
- </includes>\r
- </resource>\r
- </resources>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-dependency-plugin</artifactId>\r
- <version>2.10</version>\r
- <executions>\r
- <execution>\r
- <id>copy-dependencies</id>\r
- <phase>package</phase>\r
- <goals>\r
- <goal>copy-dependencies</goal>\r
- </goals>\r
- <configuration>\r
- <outputDirectory>${project.build.directory}/opt/app/datartr/lib</outputDirectory>\r
- <overWriteReleases>false</overWriteReleases>\r
- <overWriteSnapshots>false</overWriteSnapshots>\r
- <overWriteIfNewer>true</overWriteIfNewer>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-javadoc-plugin</artifactId>\r
- <configuration>\r
- <failOnError>false</failOnError>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <id>attach-javadocs</id>\r
- <goals>\r
- <goal>jar</goal>\r
- </goals>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.apache.maven.plugins</groupId>\r
- <artifactId>maven-source-plugin</artifactId>\r
- <version>2.2.1</version>\r
- <executions>\r
- <execution>\r
- <id>attach-sources</id>\r
- <goals>\r
- <goal>jar-no-fork</goal>\r
- </goals>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.codehaus.mojo</groupId>\r
- <artifactId>cobertura-maven-plugin</artifactId>\r
- <version>2.7</version>\r
- <configuration>\r
- <formats>\r
- <format>html</format>\r
- <format>xml</format>\r
- </formats>\r
- <check/>\r
- </configuration>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.sonatype.plugins</groupId>\r
- <artifactId>nexus-staging-maven-plugin</artifactId>\r
- <version>1.6.7</version>\r
- <extensions>true</extensions>\r
- <configuration>\r
- <nexusUrl>${onap.nexus.url}</nexusUrl>\r
- <stagingProfileId>176c31dfe190a</stagingProfileId>\r
- <serverId>ecomp-staging</serverId>\r
- </configuration>\r
- </plugin>\r
- <plugin>\r
- <groupId>org.jacoco</groupId>\r
- <artifactId>jacoco-maven-plugin</artifactId>\r
- <version>${jacoco.version}</version>\r
- <configuration>\r
- <excludes>\r
- <exclude>**/gen/**</exclude>\r
- <exclude>**/generated-sources/**</exclude>\r
- <exclude>**/yang-gen/**</exclude>\r
- <exclude>**/pax/**</exclude>\r
- </excludes>\r
- </configuration>\r
- <executions>\r
- <execution>\r
- <id>pre-unit-test</id>\r
- <goals>\r
- <goal>prepare-agent</goal>\r
- </goals>\r
- <configuration>\r
- <destFile>${project.build.directory}/code-coverage/jacoco-ut.exec</destFile>\r
- <propertyName>surefireArgLine</propertyName>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>post-unit-test</id>\r
- <phase>test</phase>\r
- <goals>\r
- <goal>report</goal>\r
- </goals>\r
- <configuration>\r
- <dataFile>${project.build.directory}/code-coverage/jacoco-ut.exec</dataFile>\r
- <outputDirectory>${project.reporting.outputDirectory}/jacoco-ut</outputDirectory>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>pre-integration-test</id>\r
- <phase>pre-integration-test</phase>\r
- <goals>\r
- <goal>prepare-agent</goal>\r
- </goals>\r
- <configuration>\r
- <destFile>${project.build.directory}/code-coverage/jacoco-it.exec</destFile>\r
- <propertyName>failsafeArgLine</propertyName>\r
- </configuration>\r
- </execution>\r
- <execution>\r
- <id>post-integration-test</id>\r
- <phase>post-integration-test</phase>\r
- <goals>\r
- <goal>report</goal>\r
- </goals>\r
- <configuration>\r
- <dataFile>${project.build.directory}/code-coverage/jacoco-it.exec</dataFile>\r
- <outputDirectory>${project.reporting.outputDirectory}/jacoco-it</outputDirectory>\r
- </configuration>\r
- </execution>\r
- </executions>\r
- </plugin>\r
- </plugins>\r
- </build>\r
-</project>\r
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.onap.dmaap.datarouter</groupId>
+ <artifactId>parent</artifactId>
+ <version>1.0.1-SNAPSHOT</version>
+ <relativePath>../pom.xml</relativePath>
+ </parent>
+ <artifactId>datarouter-node</artifactId>
+ <packaging>jar</packaging>
+ <name>datarouter-node</name>
+ <url>https://github.com/att/DMAAP_DATAROUTER</url>
+ <properties>
+ <sonar.skip>false</sonar.skip>
+ <sonar.jacoco.reportMissing.force.zero>true</sonar.jacoco.reportMissing.force.zero>
+ <sitePath>/content/sites/site/${project.groupId}/${project.artifactId}/${project.version}</sitePath>
+ <docker.location>${basedir}/target/${artifactId}</docker.location>
+ <datarouter.node.image.name>onap/dmaap/datarouter-node</datarouter.node.image.name>
+ </properties>
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>3.8.1</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.json</groupId>
+ <artifactId>json</artifactId>
+ <version>20160810</version>
+ </dependency>
+ <dependency>
+ <groupId>javax.mail</groupId>
+ <artifactId>javax.mail-api</artifactId>
+ <version>1.5.1</version>
+ </dependency>
+ <dependency>
+ <groupId>com.att.eelf</groupId>
+ <artifactId>eelf-core</artifactId>
+ <version>0.0.1</version>
+ </dependency>
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>servlet-api</artifactId>
+ <version>2.5</version>
+ </dependency>
+ <dependency>
+ <groupId>com.thoughtworks.xstream</groupId>
+ <artifactId>xstream</artifactId>
+ <version>1.4.7</version>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-classic</artifactId>
+ <version>1.2.0</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>ch.qos.logback</groupId>
+ <artifactId>logback-core</artifactId>
+ <version>1.2.0</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-server</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-continuation</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-util</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-deploy</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlet</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlets</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-http</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-security</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-websocket</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-io</artifactId>
+ <version>7.6.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-io</artifactId>
+ <version>1.3.2</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-lang</groupId>
+ <artifactId>commons-lang</artifactId>
+ <version>2.4</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ <version>2.1</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpcore</artifactId>
+ <version>4.4</version>
+ </dependency>
+ <dependency>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <version>1.6</version>
+ </dependency>
+ <dependency>
+ <groupId>org.mozilla</groupId>
+ <artifactId>rhino</artifactId>
+ <version>1.7R3</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.james</groupId>
+ <artifactId>apache-mime4j-core</artifactId>
+ <version>0.7</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ <version>4.5.3</version>
+ </dependency>
+ <dependency>
+ <groupId>org.sonatype.http-testing-harness</groupId>
+ <artifactId>junit-runner</artifactId>
+ <version>0.11</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.databene</groupId>
+ <artifactId>contiperf</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>1.2.17</version>
+ <scope>compile</scope>
+ </dependency>
+ </dependencies>
+ <profiles>
+ <profile>
+ <id>docker</id>
+ <properties>
+ <skipDockerBuild>false</skipDockerBuild>
+ <skipDockerTag>false</skipDockerTag>
+ <skipTests>true</skipTests>
+ </properties>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.spotify</groupId>
+ <artifactId>docker-maven-plugin</artifactId>
+ <version>1.0.0</version>
+ <configuration>
+ <imageName>${onap.nexus.dockerregistry.daily}/${datarouter.node.image.name}</imageName>
+ <dockerDirectory>${docker.location}</dockerDirectory>
+ <serverId>${onap.nexus.dockerregistry.daily}</serverId>
+ <skipDockerBuild>false</skipDockerBuild>
+ <imageTags>
+ <imageTag>${project.version}</imageTag>
+ <imageTag>latest</imageTag>
+ </imageTags>
+ <forceTags>true</forceTags>
+ <resources>
+ <resource>
+ <targetPath>/</targetPath>
+ <directory>${project.basedir}</directory>
+ <excludes>
+ <exclude>target/**/*</exclude>
+ <exclude>pom.xml</exclude>
+ </excludes>
+ </resource>
+
+ <resource>
+ <targetPath>/</targetPath>
+ <directory>${project.build.directory}</directory>
+ <include>**/**</include>
+ </resource>
+ </resources>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
+
+ <build>
+ <finalName>datarouter-node</finalName>
+ <resources>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/*.properties</include>
+ </includes>
+ </resource>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/EelfMessages.properties</include>
+ </includes>
+ </resource>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/log4j.properties</include>
+ </includes>
+ </resource>
+ </resources>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <source>1.8</source>
+ <target>1.8</target>
+ </configuration>
+ <version>3.6.0</version>
+ </plugin>
+ <plugin>
+ <artifactId>maven-assembly-plugin</artifactId>
+ <version>2.4</version>
+ <configuration>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ <outputDirectory>${basedir}/target/opt/app/datartr/lib</outputDirectory>
+ <archive>
+ <manifest>
+ <addClasspath>true</addClasspath>
+ <mainClass>org.onap.dmaap.datarouter.node.NodeMain</mainClass>
+ </manifest>
+ </archive>
+ </configuration>
+ <executions>
+ <execution>
+ <id>make-assembly</id>
+ <!-- this is used for inheritance merges -->
+ <phase>package</phase>
+ <!-- bind to the packaging phase -->
+ <goals>
+ <goal>single</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <version>2.7</version>
+ <executions>
+ <execution>
+ <id>copy-docker-file</id>
+ <phase>package</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${docker.location}</outputDirectory>
+ <overwrite>true</overwrite>
+ <resources>
+ <resource>
+ <directory>${basedir}/src/main/resources/docker</directory>
+ <filtering>true</filtering>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>copy-resources</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${basedir}/target/opt/app/datartr/etc</outputDirectory>
+ <resources>
+ <resource>
+ <directory>${basedir}/src/main/resources</directory>
+ <includes>
+ <include>misc/**</include>
+ <include>**/**</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ <execution>
+ <id>copy-resources-1</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>copy-resources</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${basedir}/target/opt/app/datartr/self_signed</outputDirectory>
+ <resources>
+ <resource>
+ <directory>${basedir}/self_signed</directory>
+ <includes>
+ <include>misc/**</include>
+ <include>**/**</include>
+ </includes>
+ </resource>
+ </resources>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <version>2.10</version>
+ <executions>
+ <execution>
+ <id>copy-dependencies</id>
+ <phase>package</phase>
+ <goals>
+ <goal>copy-dependencies</goal>
+ </goals>
+ <configuration>
+ <outputDirectory>${project.build.directory}/opt/app/datartr/lib</outputDirectory>
+ <overWriteReleases>false</overWriteReleases>
+ <overWriteSnapshots>false</overWriteSnapshots>
+ <overWriteIfNewer>true</overWriteIfNewer>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <configuration>
+ <failOnError>false</failOnError>
+ </configuration>
+ <executions>
+ <execution>
+ <id>attach-javadocs</id>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <version>2.2.1</version>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <goals>
+ <goal>jar-no-fork</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>cobertura-maven-plugin</artifactId>
+ <version>2.7</version>
+ <configuration>
+ <formats>
+ <format>html</format>
+ <format>xml</format>
+ </formats>
+ <check/>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.sonatype.plugins</groupId>
+ <artifactId>nexus-staging-maven-plugin</artifactId>
+ <version>1.6.7</version>
+ <extensions>true</extensions>
+ <configuration>
+ <nexusUrl>${onap.nexus.url}</nexusUrl>
+ <stagingProfileId>176c31dfe190a</stagingProfileId>
+ <serverId>ecomp-staging</serverId>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.jacoco</groupId>
+ <artifactId>jacoco-maven-plugin</artifactId>
+ <version>${jacoco.version}</version>
+ <configuration>
+ <excludes>
+ <exclude>**/gen/**</exclude>
+ <exclude>**/generated-sources/**</exclude>
+ <exclude>**/yang-gen/**</exclude>
+ <exclude>**/pax/**</exclude>
+ </excludes>
+ </configuration>
+ <executions>
+ <execution>
+ <id>pre-unit-test</id>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ <configuration>
+ <destFile>${project.build.directory}/code-coverage/jacoco-ut.exec</destFile>
+ <propertyName>surefireArgLine</propertyName>
+ </configuration>
+ </execution>
+ <execution>
+ <id>post-unit-test</id>
+ <phase>test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ <configuration>
+ <dataFile>${project.build.directory}/code-coverage/jacoco-ut.exec</dataFile>
+ <outputDirectory>${project.reporting.outputDirectory}/jacoco-ut</outputDirectory>
+ </configuration>
+ </execution>
+ <execution>
+ <id>pre-integration-test</id>
+ <phase>pre-integration-test</phase>
+ <goals>
+ <goal>prepare-agent</goal>
+ </goals>
+ <configuration>
+ <destFile>${project.build.directory}/code-coverage/jacoco-it.exec</destFile>
+ <propertyName>failsafeArgLine</propertyName>
+ </configuration>
+ </execution>
+ <execution>
+ <id>post-integration-test</id>
+ <phase>post-integration-test</phase>
+ <goals>
+ <goal>report</goal>
+ </goals>
+ <configuration>
+ <dataFile>${project.build.directory}/code-coverage/jacoco-it.exec</dataFile>
+ <outputDirectory>${project.reporting.outputDirectory}/jacoco-it</outputDirectory>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.io.*;\r
-import org.apache.log4j.Logger;\r
-\r
-/**\r
- * Main control point for delivering files to destinations.\r
- * <p>\r
- * The Delivery class manages assignment of delivery threads to delivery\r
- * queues and creation and destruction of delivery queues as\r
- * configuration changes. DeliveryQueues are assigned threads based on a\r
- * modified round-robin approach giving priority to queues with more work\r
- * as measured by both bytes to deliver and files to deliver and lower\r
- * priority to queues that already have delivery threads working.\r
- * A delivery thread continues to work for a delivery queue as long as\r
- * that queue has more files to deliver.\r
- */\r
-public class Delivery {\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.Delivery");\r
- private static class DelItem implements Comparable<DelItem> {\r
- private String pubid;\r
- private String spool;\r
- public int compareTo(DelItem x) {\r
- int i = pubid.compareTo(x.pubid);\r
- if (i == 0) {\r
- i = spool.compareTo(x.spool);\r
- }\r
- return(i);\r
- }\r
- public String getPublishId() {\r
- return(pubid);\r
- }\r
- public String getSpool() {\r
- return(spool);\r
- }\r
- public DelItem(String pubid, String spool) {\r
- this.pubid = pubid;\r
- this.spool = spool;\r
- }\r
- }\r
- private double fdstart;\r
- private double fdstop;\r
- private int threads;\r
- private int curthreads;\r
- private NodeConfigManager config;\r
- private Hashtable<String, DeliveryQueue> dqs = new Hashtable<String, DeliveryQueue>();\r
- private DeliveryQueue[] queues = new DeliveryQueue[0];\r
- private int qpos = 0;\r
- private long nextcheck;\r
- private Runnable cmon = new Runnable() {\r
- public void run() {\r
- checkconfig();\r
- }\r
- };\r
- /**\r
- * Constructs a new Delivery system using the specified configuration manager.\r
- * @param config The configuration manager for this delivery system.\r
- */\r
- public Delivery(NodeConfigManager config) {\r
- this.config = config;\r
- config.registerConfigTask(cmon);\r
- checkconfig();\r
- }\r
- private void cleardir(String dir) {\r
- if (dqs.get(dir) != null) {\r
- return;\r
- }\r
- File fdir = new File(dir);\r
- for (File junk: fdir.listFiles()) {\r
- if (junk.isFile()) {\r
- junk.delete();\r
- }\r
- }\r
- fdir.delete();\r
- }\r
- private void freeDiskCheck() {\r
- File spoolfile = new File(config.getSpoolBase());\r
- long tspace = spoolfile.getTotalSpace();\r
- long start = (long)(tspace * fdstart);\r
- long stop = (long)(tspace * fdstop);\r
- long cur = spoolfile.getUsableSpace();\r
- if (cur >= start) {\r
- return;\r
- }\r
- Vector<DelItem> cv = new Vector<DelItem>();\r
- for (String sdir: dqs.keySet()) {\r
- for (String meta: (new File(sdir)).list()) {\r
- if (!meta.endsWith(".M") || meta.charAt(0) == '.') {\r
- continue;\r
- }\r
- cv.add(new DelItem(meta.substring(0, meta.length() - 2), sdir));\r
- }\r
- }\r
- DelItem[] items = cv.toArray(new DelItem[cv.size()]);\r
- Arrays.sort(items);\r
- logger.info("NODE0501 Free disk space below red threshold. current=" + cur + " red=" + start + " total=" + tspace);\r
- for (DelItem item: items) {\r
- long amount = dqs.get(item.getSpool()).cancelTask(item.getPublishId());\r
- logger.info("NODE0502 Attempting to discard " + item.getSpool() + "/" + item.getPublishId() + " to free up disk");\r
- if (amount > 0) {\r
- cur += amount;\r
- if (cur >= stop) {\r
- cur = spoolfile.getUsableSpace();\r
- }\r
- if (cur >= stop) {\r
- logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);\r
- return;\r
- }\r
- }\r
- }\r
- cur = spoolfile.getUsableSpace();\r
- if (cur >= stop) {\r
- logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);\r
- return;\r
- }\r
- logger.warn("NODE0504 Unable to recover sufficient disk space to reach green status. current=" + cur + " yellow=" + stop + " total=" + tspace);\r
- }\r
- private void cleardirs() {\r
- String basedir = config.getSpoolBase();\r
- String nbase = basedir + "/n";\r
- for (String nodedir: (new File(nbase)).list()) {\r
- if (!nodedir.startsWith(".")) {\r
- cleardir(nbase + "/" + nodedir);\r
- }\r
- }\r
- String sxbase = basedir + "/s";\r
- for (String sxdir: (new File(sxbase)).list()) {\r
- if (sxdir.startsWith(".")) {\r
- continue;\r
- }\r
- File sxf = new File(sxbase + "/" + sxdir);\r
- for (String sdir: sxf.list()) {\r
- if (!sdir.startsWith(".")) {\r
- cleardir(sxbase + "/" + sxdir + "/" + sdir);\r
- }\r
- }\r
- sxf.delete(); // won't if anything still in it\r
- }\r
- }\r
- private synchronized void checkconfig() {\r
- if (!config.isConfigured()) {\r
- return;\r
- }\r
- fdstart = config.getFreeDiskStart();\r
- fdstop = config.getFreeDiskStop();\r
- threads = config.getDeliveryThreads();\r
- if (threads < 1) {\r
- threads = 1;\r
- }\r
- DestInfo[] alldis = config.getAllDests();\r
- DeliveryQueue[] nqs = new DeliveryQueue[alldis.length];\r
- qpos = 0;\r
- Hashtable<String, DeliveryQueue> ndqs = new Hashtable<String, DeliveryQueue>();\r
- for (DestInfo di: alldis) {\r
- String spl = di.getSpool();\r
- DeliveryQueue dq = dqs.get(spl);\r
- if (dq == null) {\r
- dq = new DeliveryQueue(config, di);\r
- } else {\r
- dq.config(di);\r
- }\r
- ndqs.put(spl, dq);\r
- nqs[qpos++] = dq;\r
- }\r
- queues = nqs;\r
- dqs = ndqs;\r
- cleardirs();\r
- while (curthreads < threads) {\r
- curthreads++;\r
- (new Thread() {\r
- {\r
- setName("Delivery Thread");\r
- }\r
- public void run() {\r
- dodelivery();\r
- }\r
- }).start();\r
- }\r
- nextcheck = 0;\r
- notify();\r
- }\r
- private void dodelivery() {\r
- DeliveryQueue dq;\r
- while ((dq = getNextQueue()) != null) {\r
- dq.run();\r
- }\r
- }\r
- private synchronized DeliveryQueue getNextQueue() {\r
- while (true) {\r
- if (curthreads > threads) {\r
- curthreads--;\r
- return(null);\r
- }\r
- if (qpos < queues.length) {\r
- DeliveryQueue dq = queues[qpos++];\r
- if (dq.isSkipSet()) {\r
- continue;\r
- }\r
- nextcheck = 0;\r
- notify();\r
- return(dq);\r
- }\r
- long now = System.currentTimeMillis();\r
- if (now < nextcheck) {\r
- try {\r
- wait(nextcheck + 500 - now);\r
- } catch (Exception e) {\r
- }\r
- now = System.currentTimeMillis();\r
- }\r
- if (now >= nextcheck) {\r
- nextcheck = now + 5000;\r
- qpos = 0;\r
- freeDiskCheck();\r
- }\r
- }\r
- }\r
- /**\r
- * Reset the retry timer for a delivery queue\r
- */\r
- public synchronized void resetQueue(String spool) {\r
- if (spool != null) {\r
- DeliveryQueue dq = dqs.get(spool);\r
- if (dq != null) {\r
- dq.resetQueue();\r
- }\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.io.*;
+
+import org.apache.log4j.Logger;
+
+/**
+ * Main control point for delivering files to destinations.
+ * <p>
+ * The Delivery class manages assignment of delivery threads to delivery
+ * queues and creation and destruction of delivery queues as
+ * configuration changes. DeliveryQueues are assigned threads based on a
+ * modified round-robin approach giving priority to queues with more work
+ * as measured by both bytes to deliver and files to deliver and lower
+ * priority to queues that already have delivery threads working.
+ * A delivery thread continues to work for a delivery queue as long as
+ * that queue has more files to deliver.
+ */
+public class Delivery {
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.Delivery");
+
+ private static class DelItem implements Comparable<DelItem> {
+ private String pubid;
+ private String spool;
+
+ public int compareTo(DelItem x) {
+ int i = pubid.compareTo(x.pubid);
+ if (i == 0) {
+ i = spool.compareTo(x.spool);
+ }
+ return (i);
+ }
+
+ public String getPublishId() {
+ return (pubid);
+ }
+
+ public String getSpool() {
+ return (spool);
+ }
+
+ public DelItem(String pubid, String spool) {
+ this.pubid = pubid;
+ this.spool = spool;
+ }
+ }
+
+ private double fdstart;
+ private double fdstop;
+ private int threads;
+ private int curthreads;
+ private NodeConfigManager config;
+ private Hashtable<String, DeliveryQueue> dqs = new Hashtable<String, DeliveryQueue>();
+ private DeliveryQueue[] queues = new DeliveryQueue[0];
+ private int qpos = 0;
+ private long nextcheck;
+ private Runnable cmon = new Runnable() {
+ public void run() {
+ checkconfig();
+ }
+ };
+
+ /**
+ * Constructs a new Delivery system using the specified configuration manager.
+ *
+ * @param config The configuration manager for this delivery system.
+ */
+ public Delivery(NodeConfigManager config) {
+ this.config = config;
+ config.registerConfigTask(cmon);
+ checkconfig();
+ }
+
+ private void cleardir(String dir) {
+ if (dqs.get(dir) != null) {
+ return;
+ }
+ File fdir = new File(dir);
+ for (File junk : fdir.listFiles()) {
+ if (junk.isFile()) {
+ junk.delete();
+ }
+ }
+ fdir.delete();
+ }
+
+ private void freeDiskCheck() {
+ File spoolfile = new File(config.getSpoolBase());
+ long tspace = spoolfile.getTotalSpace();
+ long start = (long) (tspace * fdstart);
+ long stop = (long) (tspace * fdstop);
+ long cur = spoolfile.getUsableSpace();
+ if (cur >= start) {
+ return;
+ }
+ Vector<DelItem> cv = new Vector<DelItem>();
+ for (String sdir : dqs.keySet()) {
+ for (String meta : (new File(sdir)).list()) {
+ if (!meta.endsWith(".M") || meta.charAt(0) == '.') {
+ continue;
+ }
+ cv.add(new DelItem(meta.substring(0, meta.length() - 2), sdir));
+ }
+ }
+ DelItem[] items = cv.toArray(new DelItem[cv.size()]);
+ Arrays.sort(items);
+ logger.info("NODE0501 Free disk space below red threshold. current=" + cur + " red=" + start + " total=" + tspace);
+ for (DelItem item : items) {
+ long amount = dqs.get(item.getSpool()).cancelTask(item.getPublishId());
+ logger.info("NODE0502 Attempting to discard " + item.getSpool() + "/" + item.getPublishId() + " to free up disk");
+ if (amount > 0) {
+ cur += amount;
+ if (cur >= stop) {
+ cur = spoolfile.getUsableSpace();
+ }
+ if (cur >= stop) {
+ logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);
+ return;
+ }
+ }
+ }
+ cur = spoolfile.getUsableSpace();
+ if (cur >= stop) {
+ logger.info("NODE0503 Free disk space at or above yellow threshold. current=" + cur + " yellow=" + stop + " total=" + tspace);
+ return;
+ }
+ logger.warn("NODE0504 Unable to recover sufficient disk space to reach green status. current=" + cur + " yellow=" + stop + " total=" + tspace);
+ }
+
+ private void cleardirs() {
+ String basedir = config.getSpoolBase();
+ String nbase = basedir + "/n";
+ for (String nodedir : (new File(nbase)).list()) {
+ if (!nodedir.startsWith(".")) {
+ cleardir(nbase + "/" + nodedir);
+ }
+ }
+ String sxbase = basedir + "/s";
+ for (String sxdir : (new File(sxbase)).list()) {
+ if (sxdir.startsWith(".")) {
+ continue;
+ }
+ File sxf = new File(sxbase + "/" + sxdir);
+ for (String sdir : sxf.list()) {
+ if (!sdir.startsWith(".")) {
+ cleardir(sxbase + "/" + sxdir + "/" + sdir);
+ }
+ }
+ sxf.delete(); // won't if anything still in it
+ }
+ }
+
+ private synchronized void checkconfig() {
+ if (!config.isConfigured()) {
+ return;
+ }
+ fdstart = config.getFreeDiskStart();
+ fdstop = config.getFreeDiskStop();
+ threads = config.getDeliveryThreads();
+ if (threads < 1) {
+ threads = 1;
+ }
+ DestInfo[] alldis = config.getAllDests();
+ DeliveryQueue[] nqs = new DeliveryQueue[alldis.length];
+ qpos = 0;
+ Hashtable<String, DeliveryQueue> ndqs = new Hashtable<String, DeliveryQueue>();
+ for (DestInfo di : alldis) {
+ String spl = di.getSpool();
+ DeliveryQueue dq = dqs.get(spl);
+ if (dq == null) {
+ dq = new DeliveryQueue(config, di);
+ } else {
+ dq.config(di);
+ }
+ ndqs.put(spl, dq);
+ nqs[qpos++] = dq;
+ }
+ queues = nqs;
+ dqs = ndqs;
+ cleardirs();
+ while (curthreads < threads) {
+ curthreads++;
+ (new Thread() {
+ {
+ setName("Delivery Thread");
+ }
+
+ public void run() {
+ dodelivery();
+ }
+ }).start();
+ }
+ nextcheck = 0;
+ notify();
+ }
+
+ private void dodelivery() {
+ DeliveryQueue dq;
+ while ((dq = getNextQueue()) != null) {
+ dq.run();
+ }
+ }
+
+ private synchronized DeliveryQueue getNextQueue() {
+ while (true) {
+ if (curthreads > threads) {
+ curthreads--;
+ return (null);
+ }
+ if (qpos < queues.length) {
+ DeliveryQueue dq = queues[qpos++];
+ if (dq.isSkipSet()) {
+ continue;
+ }
+ nextcheck = 0;
+ notify();
+ return (dq);
+ }
+ long now = System.currentTimeMillis();
+ if (now < nextcheck) {
+ try {
+ wait(nextcheck + 500 - now);
+ } catch (Exception e) {
+ }
+ now = System.currentTimeMillis();
+ }
+ if (now >= nextcheck) {
+ nextcheck = now + 5000;
+ qpos = 0;
+ freeDiskCheck();
+ }
+ }
+ }
+
+ /**
+ * Reset the retry timer for a delivery queue
+ */
+ public synchronized void resetQueue(String spool) {
+ if (spool != null) {
+ DeliveryQueue dq = dqs.get(spool);
+ if (dq != null) {
+ dq.resetQueue();
+ }
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.io.*;\r
-import java.util.*;\r
-\r
-/**\r
- * Mechanism for monitoring and controlling delivery of files to a destination.\r
- * <p>\r
- * The DeliveryQueue class maintains lists of DeliveryTasks for a single\r
- * destination (a subscription or another data router node) and assigns\r
- * delivery threads to try to deliver them. It also maintains a delivery\r
- * status that causes it to back off on delivery attempts after a failure.\r
- * <p>\r
- * If the most recent delivery result was a failure, then no more attempts\r
- * will be made for a period of time. Initially, and on the first failure\r
- * following a success, this delay will be DeliveryQueueHelper.getInitFailureTimer() (milliseconds).\r
- * If, after this delay, additional failures occur, each failure will\r
- * multiply the delay by DeliveryQueueHelper.getFailureBackoff() up to a\r
- * maximum delay specified by DeliveryQueueHelper.getMaxFailureTimer().\r
- * Note that this behavior applies to the delivery queue as a whole and not\r
- * to individual files in the queue. If multiple files are being\r
- * delivered and one fails, the delay will be started. If a second\r
- * delivery fails while the delay was active, it will not change the delay\r
- * or change the duration of any subsequent delay.\r
- * If, however, it succeeds, it will cancel the delay.\r
- * <p>\r
- * The queue maintains 3 collections of files to deliver: A todo list of\r
- * files that will be attempted, a working set of files that are being\r
- * attempted, and a retry set of files that were attempted and failed.\r
- * Whenever the todo list is empty and needs to be refilled, a scan of the\r
- * spool directory is made and the file names sorted. Any files in the working set are ignored.\r
- * If a DeliveryTask for the file is in the retry set, then that delivery\r
- * task is placed on the todo list. Otherwise, a new DeliveryTask for the\r
- * file is created and placed on the todo list.\r
- * If, when a DeliveryTask is about to be removed from the todo list, its\r
- * age exceeds DeliveryQueueHelper.getExpirationTimer(), then it is instead\r
- * marked as expired.\r
- * <p>\r
- * A delivery queue also maintains a skip flag. This flag is true if the\r
- * failure timer is active or if no files are found in a directory scan.\r
- */\r
-public class DeliveryQueue implements Runnable, DeliveryTaskHelper {\r
- private DeliveryQueueHelper dqh;\r
- private DestInfo di;\r
- private Hashtable<String, DeliveryTask> working = new Hashtable<String, DeliveryTask>();\r
- private Hashtable<String, DeliveryTask> retry = new Hashtable<String, DeliveryTask>();\r
- private int todoindex;\r
- private boolean failed;\r
- private long failduration;\r
- private long resumetime;\r
- File dir;\r
- private Vector<DeliveryTask> todo = new Vector<DeliveryTask>();\r
- /**\r
- * Try to cancel a delivery task.\r
- * @return The length of the task in bytes or 0 if the task cannot be cancelled.\r
- */\r
- public synchronized long cancelTask(String pubid) {\r
- if (working.get(pubid) != null) {\r
- return(0);\r
- }\r
- DeliveryTask dt = retry.get(pubid);\r
- if (dt == null) {\r
- for (int i = todoindex; i < todo.size(); i++) {\r
- DeliveryTask xdt = todo.get(i);\r
- if (xdt.getPublishId().equals(pubid)) {\r
- dt = xdt;\r
- break;\r
- }\r
- }\r
- }\r
- if (dt == null) {\r
- dt = new DeliveryTask(this, pubid);\r
- if (dt.getFileId() == null) {\r
- return(0);\r
- }\r
- }\r
- if (dt.isCleaned()) {\r
- return(0);\r
- }\r
- StatusLog.logExp(dt.getPublishId(), dt.getFeedId(), dt.getSubId(), dt.getURL(), dt.getMethod(), dt.getCType(), dt.getLength(), "diskFull", dt.getAttempts());\r
- dt.clean();\r
- return(dt.getLength());\r
- }\r
- /**\r
- * Mark that a delivery task has succeeded.\r
- */\r
- public synchronized void markSuccess(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- task.clean();\r
- failed = false;\r
- failduration = 0;\r
- }\r
- /**\r
- * Mark that a delivery task has expired.\r
- */\r
- public synchronized void markExpired(DeliveryTask task) {\r
- task.clean();\r
- }\r
- /**\r
- * Mark that a delivery task has failed permanently.\r
- */\r
- public synchronized void markFailNoRetry(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- task.clean();\r
- failed = false;\r
- failduration = 0;\r
- }\r
- private void fdupdate() {\r
- if (!failed) {\r
- failed = true;\r
- if (failduration == 0) {\r
- failduration = dqh.getInitFailureTimer();\r
- }\r
- resumetime = System.currentTimeMillis() + failduration;\r
- long maxdur = dqh.getMaxFailureTimer();\r
- failduration = (long)(failduration * dqh.getFailureBackoff());\r
- if (failduration > maxdur) {\r
- failduration = maxdur;\r
- }\r
- }\r
- }\r
- /**\r
- * Mark that a delivery task has been redirected.\r
- */\r
- public synchronized void markRedirect(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- retry.put(task.getPublishId(), task);\r
- }\r
- /**\r
- * Mark that a delivery task has temporarily failed.\r
- */\r
- public synchronized void markFailWithRetry(DeliveryTask task) {\r
- working.remove(task.getPublishId());\r
- retry.put(task.getPublishId(), task);\r
- fdupdate();\r
- }\r
- /**\r
- * Get the next task.\r
- */\r
- public synchronized DeliveryTask getNext() {\r
- DeliveryTask ret = peekNext();\r
- if (ret != null) {\r
- todoindex++;\r
- working.put(ret.getPublishId(), ret);\r
- }\r
- return(ret);\r
- }\r
- /**\r
- * Peek at the next task.\r
- */\r
- public synchronized DeliveryTask peekNext() {\r
- long now = System.currentTimeMillis();\r
- long mindate = now - dqh.getExpirationTimer();\r
- if (failed) {\r
- if (now > resumetime) {\r
- failed = false;\r
- } else {\r
- return(null);\r
- }\r
- }\r
- while (true) {\r
- if (todoindex >= todo.size()) {\r
- todoindex = 0;\r
- todo = new Vector<DeliveryTask>();\r
- String[] files = dir.list();\r
- Arrays.sort(files);\r
- for (String fname: files) {\r
- if (!fname.endsWith(".M")) {\r
- continue;\r
- }\r
- String fname2 = fname.substring(0, fname.length() - 2);\r
- long pidtime = 0;\r
- int dot = fname2.indexOf('.');\r
- if (dot < 1) {\r
- continue;\r
- }\r
- try {\r
- pidtime = Long.parseLong(fname2.substring(0, dot));\r
- } catch (Exception e) {\r
- }\r
- if (pidtime < 1000000000000L) {\r
- continue;\r
- }\r
- if (working.get(fname2) != null) {\r
- continue;\r
- }\r
- DeliveryTask dt = retry.get(fname2);\r
- if (dt == null) {\r
- dt = new DeliveryTask(this, fname2);\r
- }\r
- todo.add(dt);\r
- }\r
- retry = new Hashtable<String, DeliveryTask>();\r
- }\r
- if (todoindex < todo.size()) {\r
- DeliveryTask dt = todo.get(todoindex);\r
- if (dt.isCleaned()) {\r
- todoindex++;\r
- continue;\r
- }\r
- if (dt.getDate() >= mindate) {\r
- return(dt);\r
- }\r
- todoindex++;\r
- reportExpiry(dt);\r
- continue;\r
- }\r
- return(null);\r
- }\r
- }\r
- /**\r
- * Create a delivery queue for a given destination info\r
- */\r
- public DeliveryQueue(DeliveryQueueHelper dqh, DestInfo di) {\r
- this.dqh = dqh;\r
- this.di = di;\r
- dir = new File(di.getSpool());\r
- dir.mkdirs();\r
- }\r
- /**\r
- * Update the destination info for this delivery queue\r
- */\r
- public void config(DestInfo di) {\r
- this.di = di;\r
- }\r
- /**\r
- * Get the dest info\r
- */\r
- public DestInfo getDestInfo() {\r
- return(di);\r
- }\r
- /**\r
- * Get the config manager\r
- */\r
- public DeliveryQueueHelper getConfig() {\r
- return(dqh);\r
- }\r
- /**\r
- * Exceptional condition occurred during delivery\r
- */\r
- public void reportDeliveryExtra(DeliveryTask task, long sent) {\r
- StatusLog.logDelExtra(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getLength(), sent);\r
- }\r
- /**\r
- * Message too old to deliver\r
- */\r
- public void reportExpiry(DeliveryTask task) {\r
- StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "retriesExhausted", task.getAttempts());\r
- markExpired(task);\r
- }\r
- /**\r
- * Completed a delivery attempt\r
- */\r
- public void reportStatus(DeliveryTask task, int status, String xpubid, String location) {\r
- if (status < 300) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, xpubid);\r
- markSuccess(task);\r
- } else if (status < 400 && dqh.isFollowRedirects()) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);\r
- if (dqh.handleRedirection(di, location, task.getFileId())) {\r
- markRedirect(task);\r
- } else {\r
- StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());\r
- markFailNoRetry(task);\r
- }\r
- } else if (status < 500) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);\r
- StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());\r
- markFailNoRetry(task);\r
- } else {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);\r
- markFailWithRetry(task);\r
- }\r
- }\r
- /**\r
- * Delivery failed by reason of an exception\r
- */\r
- public void reportException(DeliveryTask task, Exception exception) {\r
- StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), -1, exception.toString());\r
- dqh.handleUnreachable(di);\r
- markFailWithRetry(task);\r
- }\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid) {\r
- return(dqh.getFeedId(subid));\r
- }\r
- /**\r
- * Get the URL to deliver a message to given the file ID\r
- */\r
- public String getDestURL(String fileid) {\r
- return(dqh.getDestURL(di, fileid));\r
- }\r
- /**\r
- * Deliver files until there's a failure or there are no more\r
- * files to deliver\r
- */\r
- public void run() {\r
- DeliveryTask t;\r
- long endtime = System.currentTimeMillis() + dqh.getFairTimeLimit();\r
- int filestogo = dqh.getFairFileLimit();\r
- while ((t = getNext()) != null) {\r
- t.run();\r
- if (--filestogo <= 0 || System.currentTimeMillis() > endtime) {\r
- break;\r
- }\r
- }\r
- }\r
- /**\r
- * Is there no work to do for this queue right now?\r
- */\r
- public synchronized boolean isSkipSet() {\r
- return(peekNext() == null);\r
- }\r
- /**\r
- * Reset the retry timer\r
- */\r
- public void resetQueue() {\r
- resumetime = System.currentTimeMillis();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ * Mechanism for monitoring and controlling delivery of files to a destination.
+ * <p>
+ * The DeliveryQueue class maintains lists of DeliveryTasks for a single
+ * destination (a subscription or another data router node) and assigns
+ * delivery threads to try to deliver them. It also maintains a delivery
+ * status that causes it to back off on delivery attempts after a failure.
+ * <p>
+ * If the most recent delivery result was a failure, then no more attempts
+ * will be made for a period of time. Initially, and on the first failure
+ * following a success, this delay will be DeliveryQueueHelper.getInitFailureTimer() (milliseconds).
+ * If, after this delay, additional failures occur, each failure will
+ * multiply the delay by DeliveryQueueHelper.getFailureBackoff() up to a
+ * maximum delay specified by DeliveryQueueHelper.getMaxFailureTimer().
+ * Note that this behavior applies to the delivery queue as a whole and not
+ * to individual files in the queue. If multiple files are being
+ * delivered and one fails, the delay will be started. If a second
+ * delivery fails while the delay was active, it will not change the delay
+ * or change the duration of any subsequent delay.
+ * If, however, it succeeds, it will cancel the delay.
+ * <p>
+ * The queue maintains 3 collections of files to deliver: A todo list of
+ * files that will be attempted, a working set of files that are being
+ * attempted, and a retry set of files that were attempted and failed.
+ * Whenever the todo list is empty and needs to be refilled, a scan of the
+ * spool directory is made and the file names sorted. Any files in the working set are ignored.
+ * If a DeliveryTask for the file is in the retry set, then that delivery
+ * task is placed on the todo list. Otherwise, a new DeliveryTask for the
+ * file is created and placed on the todo list.
+ * If, when a DeliveryTask is about to be removed from the todo list, its
+ * age exceeds DeliveryQueueHelper.getExpirationTimer(), then it is instead
+ * marked as expired.
+ * <p>
+ * A delivery queue also maintains a skip flag. This flag is true if the
+ * failure timer is active or if no files are found in a directory scan.
+ */
+public class DeliveryQueue implements Runnable, DeliveryTaskHelper {
+ private DeliveryQueueHelper dqh;
+ private DestInfo di;
+ private Hashtable<String, DeliveryTask> working = new Hashtable<String, DeliveryTask>();
+ private Hashtable<String, DeliveryTask> retry = new Hashtable<String, DeliveryTask>();
+ private int todoindex;
+ private boolean failed;
+ private long failduration;
+ private long resumetime;
+ File dir;
+ private Vector<DeliveryTask> todo = new Vector<DeliveryTask>();
+
+ /**
+ * Try to cancel a delivery task.
+ *
+ * @return The length of the task in bytes or 0 if the task cannot be cancelled.
+ */
+ public synchronized long cancelTask(String pubid) {
+ if (working.get(pubid) != null) {
+ return (0);
+ }
+ DeliveryTask dt = retry.get(pubid);
+ if (dt == null) {
+ for (int i = todoindex; i < todo.size(); i++) {
+ DeliveryTask xdt = todo.get(i);
+ if (xdt.getPublishId().equals(pubid)) {
+ dt = xdt;
+ break;
+ }
+ }
+ }
+ if (dt == null) {
+ dt = new DeliveryTask(this, pubid);
+ if (dt.getFileId() == null) {
+ return (0);
+ }
+ }
+ if (dt.isCleaned()) {
+ return (0);
+ }
+ StatusLog.logExp(dt.getPublishId(), dt.getFeedId(), dt.getSubId(), dt.getURL(), dt.getMethod(), dt.getCType(), dt.getLength(), "diskFull", dt.getAttempts());
+ dt.clean();
+ return (dt.getLength());
+ }
+
+ /**
+ * Mark that a delivery task has succeeded.
+ */
+ public synchronized void markSuccess(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ task.clean();
+ failed = false;
+ failduration = 0;
+ }
+
+ /**
+ * Mark that a delivery task has expired.
+ */
+ public synchronized void markExpired(DeliveryTask task) {
+ task.clean();
+ }
+
+ /**
+ * Mark that a delivery task has failed permanently.
+ */
+ public synchronized void markFailNoRetry(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ task.clean();
+ failed = false;
+ failduration = 0;
+ }
+
+ private void fdupdate() {
+ if (!failed) {
+ failed = true;
+ if (failduration == 0) {
+ failduration = dqh.getInitFailureTimer();
+ }
+ resumetime = System.currentTimeMillis() + failduration;
+ long maxdur = dqh.getMaxFailureTimer();
+ failduration = (long) (failduration * dqh.getFailureBackoff());
+ if (failduration > maxdur) {
+ failduration = maxdur;
+ }
+ }
+ }
+
+ /**
+ * Mark that a delivery task has been redirected.
+ */
+ public synchronized void markRedirect(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ retry.put(task.getPublishId(), task);
+ }
+
+ /**
+ * Mark that a delivery task has temporarily failed.
+ */
+ public synchronized void markFailWithRetry(DeliveryTask task) {
+ working.remove(task.getPublishId());
+ retry.put(task.getPublishId(), task);
+ fdupdate();
+ }
+
+ /**
+ * Get the next task.
+ */
+ public synchronized DeliveryTask getNext() {
+ DeliveryTask ret = peekNext();
+ if (ret != null) {
+ todoindex++;
+ working.put(ret.getPublishId(), ret);
+ }
+ return (ret);
+ }
+
+ /**
+ * Peek at the next task.
+ */
+ public synchronized DeliveryTask peekNext() {
+ long now = System.currentTimeMillis();
+ long mindate = now - dqh.getExpirationTimer();
+ if (failed) {
+ if (now > resumetime) {
+ failed = false;
+ } else {
+ return (null);
+ }
+ }
+ while (true) {
+ if (todoindex >= todo.size()) {
+ todoindex = 0;
+ todo = new Vector<DeliveryTask>();
+ String[] files = dir.list();
+ Arrays.sort(files);
+ for (String fname : files) {
+ if (!fname.endsWith(".M")) {
+ continue;
+ }
+ String fname2 = fname.substring(0, fname.length() - 2);
+ long pidtime = 0;
+ int dot = fname2.indexOf('.');
+ if (dot < 1) {
+ continue;
+ }
+ try {
+ pidtime = Long.parseLong(fname2.substring(0, dot));
+ } catch (Exception e) {
+ }
+ if (pidtime < 1000000000000L) {
+ continue;
+ }
+ if (working.get(fname2) != null) {
+ continue;
+ }
+ DeliveryTask dt = retry.get(fname2);
+ if (dt == null) {
+ dt = new DeliveryTask(this, fname2);
+ }
+ todo.add(dt);
+ }
+ retry = new Hashtable<String, DeliveryTask>();
+ }
+ if (todoindex < todo.size()) {
+ DeliveryTask dt = todo.get(todoindex);
+ if (dt.isCleaned()) {
+ todoindex++;
+ continue;
+ }
+ if (dt.getDate() >= mindate) {
+ return (dt);
+ }
+ todoindex++;
+ reportExpiry(dt);
+ continue;
+ }
+ return (null);
+ }
+ }
+
+ /**
+ * Create a delivery queue for a given destination info
+ */
+ public DeliveryQueue(DeliveryQueueHelper dqh, DestInfo di) {
+ this.dqh = dqh;
+ this.di = di;
+ dir = new File(di.getSpool());
+ dir.mkdirs();
+ }
+
+ /**
+ * Update the destination info for this delivery queue
+ */
+ public void config(DestInfo di) {
+ this.di = di;
+ }
+
+ /**
+ * Get the dest info
+ */
+ public DestInfo getDestInfo() {
+ return (di);
+ }
+
+ /**
+ * Get the config manager
+ */
+ public DeliveryQueueHelper getConfig() {
+ return (dqh);
+ }
+
+ /**
+ * Exceptional condition occurred during delivery
+ */
+ public void reportDeliveryExtra(DeliveryTask task, long sent) {
+ StatusLog.logDelExtra(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getLength(), sent);
+ }
+
+ /**
+ * Message too old to deliver
+ */
+ public void reportExpiry(DeliveryTask task) {
+ StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "retriesExhausted", task.getAttempts());
+ markExpired(task);
+ }
+
+ /**
+ * Completed a delivery attempt
+ */
+ public void reportStatus(DeliveryTask task, int status, String xpubid, String location) {
+ if (status < 300) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, xpubid);
+ markSuccess(task);
+ } else if (status < 400 && dqh.isFollowRedirects()) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);
+ if (dqh.handleRedirection(di, location, task.getFileId())) {
+ markRedirect(task);
+ } else {
+ StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());
+ markFailNoRetry(task);
+ }
+ } else if (status < 500) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);
+ StatusLog.logExp(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), "notRetryable", task.getAttempts());
+ markFailNoRetry(task);
+ } else {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), status, location);
+ markFailWithRetry(task);
+ }
+ }
+
+ /**
+ * Delivery failed by reason of an exception
+ */
+ public void reportException(DeliveryTask task, Exception exception) {
+ StatusLog.logDel(task.getPublishId(), task.getFeedId(), task.getSubId(), task.getURL(), task.getMethod(), task.getCType(), task.getLength(), di.getAuthUser(), -1, exception.toString());
+ dqh.handleUnreachable(di);
+ markFailWithRetry(task);
+ }
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid) {
+ return (dqh.getFeedId(subid));
+ }
+
+ /**
+ * Get the URL to deliver a message to given the file ID
+ */
+ public String getDestURL(String fileid) {
+ return (dqh.getDestURL(di, fileid));
+ }
+
+ /**
+ * Deliver files until there's a failure or there are no more
+ * files to deliver
+ */
+ public void run() {
+ DeliveryTask t;
+ long endtime = System.currentTimeMillis() + dqh.getFairTimeLimit();
+ int filestogo = dqh.getFairFileLimit();
+ while ((t = getNext()) != null) {
+ t.run();
+ if (--filestogo <= 0 || System.currentTimeMillis() > endtime) {
+ break;
+ }
+ }
+ }
+
+ /**
+ * Is there no work to do for this queue right now?
+ */
+ public synchronized boolean isSkipSet() {
+ return (peekNext() == null);
+ }
+
+ /**
+ * Reset the retry timer
+ */
+ public void resetQueue() {
+ resumetime = System.currentTimeMillis();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Interface to allow independent testing of the DeliveryQueue code\r
- * <p>\r
- * This interface represents all of the configuration information and\r
- * feedback mechanisms that a delivery queue needs.\r
- */\r
-public interface DeliveryQueueHelper {\r
- /**\r
- * Get the timeout (milliseconds) before retrying after an initial delivery failure\r
- */\r
- public long getInitFailureTimer();\r
- /**\r
- * Get the ratio between timeouts on consecutive delivery attempts\r
- */\r
- public double getFailureBackoff();\r
- /**\r
- * Get the maximum timeout (milliseconds) between delivery attempts\r
- */\r
- public long getMaxFailureTimer();\r
- /**\r
- * Get the expiration timer (milliseconds) for deliveries\r
- */\r
- public long getExpirationTimer();\r
- /**\r
- * Get the maximum number of file delivery attempts before checking\r
- * if another queue has work to be performed.\r
- */\r
- public int getFairFileLimit();\r
- /**\r
- * Get the maximum amount of time spent delivering files before checking if another queue has work to be performed.\r
- */\r
- public long getFairTimeLimit();\r
- /**\r
- * Get the URL for delivering a file\r
- * @param dest The destination information for the file to be delivered.\r
- * @param fileid The file id for the file to be delivered.\r
- * @return The URL for delivering the file (typically, dest.getURL() + "/" + fileid).\r
- */\r
- public String getDestURL(DestInfo dest, String fileid);\r
- /**\r
- * Forget redirections associated with a subscriber\r
- * @param dest Destination information to forget\r
- */\r
- public void handleUnreachable(DestInfo dest);\r
- /**\r
- * Post redirection for a subscriber\r
- * @param dest Destination information to update\r
- * @param location Location given by subscriber\r
- * @param fileid File ID of request\r
- * @return true if this 3xx response is retryable, otherwise, false.\r
- */\r
- public boolean handleRedirection(DestInfo dest, String location, String fileid);\r
- /**\r
- * Should I handle 3xx responses differently than 4xx responses?\r
- */\r
- public boolean isFollowRedirects();\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid);\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Interface to allow independent testing of the DeliveryQueue code
+ * <p>
+ * This interface represents all of the configuration information and
+ * feedback mechanisms that a delivery queue needs.
+ */
+public interface DeliveryQueueHelper {
+ /**
+ * Get the timeout (milliseconds) before retrying after an initial delivery failure
+ */
+ public long getInitFailureTimer();
+
+ /**
+ * Get the ratio between timeouts on consecutive delivery attempts
+ */
+ public double getFailureBackoff();
+
+ /**
+ * Get the maximum timeout (milliseconds) between delivery attempts
+ */
+ public long getMaxFailureTimer();
+
+ /**
+ * Get the expiration timer (milliseconds) for deliveries
+ */
+ public long getExpirationTimer();
+
+ /**
+ * Get the maximum number of file delivery attempts before checking
+ * if another queue has work to be performed.
+ */
+ public int getFairFileLimit();
+
+ /**
+ * Get the maximum amount of time spent delivering files before checking if another queue has work to be performed.
+ */
+ public long getFairTimeLimit();
+
+ /**
+ * Get the URL for delivering a file
+ *
+ * @param dest The destination information for the file to be delivered.
+ * @param fileid The file id for the file to be delivered.
+ * @return The URL for delivering the file (typically, dest.getURL() + "/" + fileid).
+ */
+ public String getDestURL(DestInfo dest, String fileid);
+
+ /**
+ * Forget redirections associated with a subscriber
+ *
+ * @param dest Destination information to forget
+ */
+ public void handleUnreachable(DestInfo dest);
+
+ /**
+ * Post redirection for a subscriber
+ *
+ * @param dest Destination information to update
+ * @param location Location given by subscriber
+ * @param fileid File ID of request
+ * @return true if this 3xx response is retryable, otherwise, false.
+ */
+ public boolean handleRedirection(DestInfo dest, String location, String fileid);
+
+ /**
+ * Should I handle 3xx responses differently than 4xx responses?
+ */
+ public boolean isFollowRedirects();
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid);
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.io.*;\r
-import java.net.*;\r
-import java.util.*;\r
-import org.apache.log4j.Logger;\r
-\r
-/**\r
- * A file to be delivered to a destination.\r
- * <p>\r
- * A Delivery task represents a work item for the data router - a file that\r
- * needs to be delivered and provides mechanisms to get information about\r
- * the file and its delivery data as well as to attempt delivery.\r
- */\r
-public class DeliveryTask implements Runnable, Comparable<DeliveryTask> {\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.DeliveryTask");\r
- private DeliveryTaskHelper dth;\r
- private String pubid;\r
- private DestInfo di;\r
- private String spool;\r
- private File datafile;\r
- private File metafile;\r
- private long length;\r
- private long date;\r
- private String method;\r
- private String fileid;\r
- private String ctype;\r
- private String url;\r
- private String feedid;\r
- private String subid;\r
- private int attempts;\r
- private String[][] hdrs;\r
- /**\r
- * Is the object a DeliveryTask with the same publication ID?\r
- */\r
- public boolean equals(Object o) {\r
- if (!(o instanceof DeliveryTask)) {\r
- return(false);\r
- }\r
- return(pubid.equals(((DeliveryTask)o).pubid));\r
- }\r
- /**\r
- * Compare the publication IDs.\r
- */\r
- public int compareTo(DeliveryTask o) {\r
- return(pubid.compareTo(o.pubid));\r
- }\r
- /**\r
- * Get the hash code of the publication ID.\r
- */\r
- public int hashCode() {\r
- return(pubid.hashCode());\r
- }\r
- /**\r
- * Return the publication ID.\r
- */\r
- public String toString() {\r
- return(pubid);\r
- }\r
- /**\r
- * Create a delivery task for a given delivery queue and pub ID\r
- * @param dth The delivery task helper for the queue this task is in.\r
- * @param pubid The publish ID for this file. This is used as\r
- * the base for the file name in the spool directory and is of\r
- * the form <milliseconds since 1970>.<fqdn of initial data router node>\r
- */\r
- public DeliveryTask(DeliveryTaskHelper dth, String pubid) {\r
- this.dth = dth;\r
- this.pubid = pubid;\r
- di = dth.getDestInfo();\r
- subid = di.getSubId();\r
- feedid = di.getLogData();\r
- spool = di.getSpool();\r
- String dfn = spool + "/" + pubid;\r
- String mfn = dfn + ".M";\r
- datafile = new File(spool + "/" + pubid);\r
- metafile = new File(mfn);\r
- boolean monly = di.isMetaDataOnly();\r
- date = Long.parseLong(pubid.substring(0, pubid.indexOf('.')));\r
- Vector<String[]> hdrv = new Vector<String[]>();\r
- try {\r
- BufferedReader br = new BufferedReader(new FileReader(metafile));\r
- String s = br.readLine();\r
- int i = s.indexOf('\t');\r
- method = s.substring(0, i);\r
- if (!"DELETE".equals(method) && !monly) {\r
- length = datafile.length();\r
- }\r
- fileid = s.substring(i + 1);\r
- while ((s = br.readLine()) != null) {\r
- i = s.indexOf('\t');\r
- String h = s.substring(0, i);\r
- String v = s.substring(i + 1);\r
- if ("x-att-dr-routing".equalsIgnoreCase(h)) {\r
- subid = v.replaceAll("[^ ]*/", "");\r
- feedid = dth.getFeedId(subid.replaceAll(" .*", ""));\r
- }\r
- if (length == 0 && h.toLowerCase().startsWith("content-")) {\r
- continue;\r
- }\r
- if (h.equalsIgnoreCase("content-type")) {\r
- ctype = v;\r
- }\r
- hdrv.add(new String[] {h, v});\r
- }\r
- br.close();\r
- } catch (Exception e) {\r
- }\r
- hdrs = hdrv.toArray(new String[hdrv.size()][]);\r
- url = dth.getDestURL(fileid);\r
- }\r
- /**\r
- * Get the publish ID\r
- */\r
- public String getPublishId() {\r
- return(pubid);\r
- }\r
- /**\r
- * Attempt delivery\r
- */\r
- public void run() {\r
- attempts++;\r
- try {\r
- di = dth.getDestInfo();\r
- boolean expect100 = di.isUsing100();\r
- boolean monly = di.isMetaDataOnly();\r
- length = 0;\r
- if (!"DELETE".equals(method) && !monly) {\r
- length = datafile.length();\r
- }\r
- url = dth.getDestURL(fileid);\r
- URL u = new URL(url);\r
- HttpURLConnection uc = (HttpURLConnection)u.openConnection();\r
- uc.setConnectTimeout(60000);\r
- uc.setReadTimeout(60000);\r
- uc.setInstanceFollowRedirects(false);\r
- uc.setRequestMethod(method);\r
- uc.setRequestProperty("Content-Length", Long.toString(length));\r
- uc.setRequestProperty("Authorization", di.getAuth());\r
- uc.setRequestProperty("X-ATT-DR-PUBLISH-ID", pubid);\r
- for (String[] nv: hdrs) {\r
- uc.addRequestProperty(nv[0], nv[1]);\r
- }\r
- if (length > 0) {\r
- if (expect100) {\r
- uc.setRequestProperty("Expect", "100-continue");\r
- }\r
- uc.setFixedLengthStreamingMode(length);\r
- uc.setDoOutput(true);\r
- OutputStream os = null;\r
- try {\r
- os = uc.getOutputStream();\r
- } catch (ProtocolException pe) {\r
- dth.reportDeliveryExtra(this, -1L);\r
- // Rcvd error instead of 100-continue\r
- }\r
- if (os != null) {\r
- long sofar = 0;\r
- try {\r
- byte[] buf = new byte[1024 * 1024];\r
- InputStream is = new FileInputStream(datafile);\r
- while (sofar < length) {\r
- int i = buf.length;\r
- if (sofar + i > length) {\r
- i = (int)(length - sofar);\r
- }\r
- i = is.read(buf, 0, i);\r
- if (i <= 0) {\r
- throw new IOException("Unexpected problem reading data file " + datafile);\r
- }\r
- sofar += i;\r
- os.write(buf, 0, i);\r
- }\r
- is.close();\r
- os.close();\r
- } catch (IOException ioe) {\r
- dth.reportDeliveryExtra(this, sofar);\r
- throw ioe;\r
- }\r
- }\r
- }\r
- int rc = uc.getResponseCode();\r
- String rmsg = uc.getResponseMessage();\r
- if (rmsg == null) {\r
- String h0 = uc.getHeaderField(0);\r
- if (h0 != null) {\r
- int i = h0.indexOf(' ');\r
- int j = h0.indexOf(' ', i + 1);\r
- if (i != -1 && j != -1) {\r
- rmsg = h0.substring(j + 1);\r
- }\r
- }\r
- }\r
- String xpubid = null;\r
- InputStream is;\r
- if (rc >= 200 && rc <= 299) {\r
- is = uc.getInputStream();\r
- xpubid = uc.getHeaderField("X-ATT-DR-PUBLISH-ID");\r
- } else {\r
- if (rc >= 300 && rc <= 399) {\r
- rmsg = uc.getHeaderField("Location");\r
- }\r
- is = uc.getErrorStream();\r
- }\r
- byte[] buf = new byte[4096];\r
- if (is != null) {\r
- while (is.read(buf) > 0) {\r
- }\r
- is.close();\r
- }\r
- dth.reportStatus(this, rc, xpubid, rmsg);\r
- } catch (Exception e) {\r
- dth.reportException(this, e);\r
- }\r
- }\r
- /**\r
- * Remove meta and data files\r
- */\r
- public void clean() {\r
- datafile.delete();\r
- metafile.delete();\r
- hdrs = null;\r
- }\r
- /**\r
- * Has this delivery task been cleaned?\r
- */\r
- public boolean isCleaned() {\r
- return(hdrs == null);\r
- }\r
- /**\r
- * Get length of body\r
- */\r
- public long getLength() {\r
- return(length);\r
- }\r
- /**\r
- * Get creation date as encoded in the publish ID.\r
- */\r
- public long getDate() {\r
- return(date);\r
- }\r
- /**\r
- * Get the most recent delivery attempt URL\r
- */\r
- public String getURL() {\r
- return(url);\r
- }\r
- /**\r
- * Get the content type\r
- */\r
- public String getCType() {\r
- return(ctype);\r
- }\r
- /**\r
- * Get the method\r
- */\r
- public String getMethod() {\r
- return(method);\r
- }\r
- /**\r
- * Get the file ID\r
- */\r
- public String getFileId() {\r
- return(fileid);\r
- }\r
- /**\r
- * Get the number of delivery attempts\r
- */\r
- public int getAttempts() {\r
- return(attempts);\r
- }\r
- /**\r
- * Get the (space delimited list of) subscription ID for this delivery task\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the feed ID for this delivery task\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.io.*;
+import java.net.*;
+import java.util.*;
+
+import org.apache.log4j.Logger;
+
+/**
+ * A file to be delivered to a destination.
+ * <p>
+ * A Delivery task represents a work item for the data router - a file that
+ * needs to be delivered and provides mechanisms to get information about
+ * the file and its delivery data as well as to attempt delivery.
+ */
+public class DeliveryTask implements Runnable, Comparable<DeliveryTask> {
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.DeliveryTask");
+ private DeliveryTaskHelper dth;
+ private String pubid;
+ private DestInfo di;
+ private String spool;
+ private File datafile;
+ private File metafile;
+ private long length;
+ private long date;
+ private String method;
+ private String fileid;
+ private String ctype;
+ private String url;
+ private String feedid;
+ private String subid;
+ private int attempts;
+ private String[][] hdrs;
+
+ /**
+ * Is the object a DeliveryTask with the same publication ID?
+ */
+ public boolean equals(Object o) {
+ if (!(o instanceof DeliveryTask)) {
+ return (false);
+ }
+ return (pubid.equals(((DeliveryTask) o).pubid));
+ }
+
+ /**
+ * Compare the publication IDs.
+ */
+ public int compareTo(DeliveryTask o) {
+ return (pubid.compareTo(o.pubid));
+ }
+
+ /**
+ * Get the hash code of the publication ID.
+ */
+ public int hashCode() {
+ return (pubid.hashCode());
+ }
+
+ /**
+ * Return the publication ID.
+ */
+ public String toString() {
+ return (pubid);
+ }
+
+ /**
+ * Create a delivery task for a given delivery queue and pub ID
+ *
+ * @param dth The delivery task helper for the queue this task is in.
+ * @param pubid The publish ID for this file. This is used as
+ * the base for the file name in the spool directory and is of
+ * the form <milliseconds since 1970>.<fqdn of initial data router node>
+ */
+ public DeliveryTask(DeliveryTaskHelper dth, String pubid) {
+ this.dth = dth;
+ this.pubid = pubid;
+ di = dth.getDestInfo();
+ subid = di.getSubId();
+ feedid = di.getLogData();
+ spool = di.getSpool();
+ String dfn = spool + "/" + pubid;
+ String mfn = dfn + ".M";
+ datafile = new File(spool + "/" + pubid);
+ metafile = new File(mfn);
+ boolean monly = di.isMetaDataOnly();
+ date = Long.parseLong(pubid.substring(0, pubid.indexOf('.')));
+ Vector<String[]> hdrv = new Vector<String[]>();
+ try {
+ BufferedReader br = new BufferedReader(new FileReader(metafile));
+ String s = br.readLine();
+ int i = s.indexOf('\t');
+ method = s.substring(0, i);
+ if (!"DELETE".equals(method) && !monly) {
+ length = datafile.length();
+ }
+ fileid = s.substring(i + 1);
+ while ((s = br.readLine()) != null) {
+ i = s.indexOf('\t');
+ String h = s.substring(0, i);
+ String v = s.substring(i + 1);
+ if ("x-att-dr-routing".equalsIgnoreCase(h)) {
+ subid = v.replaceAll("[^ ]*/", "");
+ feedid = dth.getFeedId(subid.replaceAll(" .*", ""));
+ }
+ if (length == 0 && h.toLowerCase().startsWith("content-")) {
+ continue;
+ }
+ if (h.equalsIgnoreCase("content-type")) {
+ ctype = v;
+ }
+ hdrv.add(new String[]{h, v});
+ }
+ br.close();
+ } catch (Exception e) {
+ }
+ hdrs = hdrv.toArray(new String[hdrv.size()][]);
+ url = dth.getDestURL(fileid);
+ }
+
+ /**
+ * Get the publish ID
+ */
+ public String getPublishId() {
+ return (pubid);
+ }
+
+ /**
+ * Attempt delivery
+ */
+ public void run() {
+ attempts++;
+ try {
+ di = dth.getDestInfo();
+ boolean expect100 = di.isUsing100();
+ boolean monly = di.isMetaDataOnly();
+ length = 0;
+ if (!"DELETE".equals(method) && !monly) {
+ length = datafile.length();
+ }
+ url = dth.getDestURL(fileid);
+ URL u = new URL(url);
+ HttpURLConnection uc = (HttpURLConnection) u.openConnection();
+ uc.setConnectTimeout(60000);
+ uc.setReadTimeout(60000);
+ uc.setInstanceFollowRedirects(false);
+ uc.setRequestMethod(method);
+ uc.setRequestProperty("Content-Length", Long.toString(length));
+ uc.setRequestProperty("Authorization", di.getAuth());
+ uc.setRequestProperty("X-ATT-DR-PUBLISH-ID", pubid);
+ for (String[] nv : hdrs) {
+ uc.addRequestProperty(nv[0], nv[1]);
+ }
+ if (length > 0) {
+ if (expect100) {
+ uc.setRequestProperty("Expect", "100-continue");
+ }
+ uc.setFixedLengthStreamingMode(length);
+ uc.setDoOutput(true);
+ OutputStream os = null;
+ try {
+ os = uc.getOutputStream();
+ } catch (ProtocolException pe) {
+ dth.reportDeliveryExtra(this, -1L);
+ // Rcvd error instead of 100-continue
+ }
+ if (os != null) {
+ long sofar = 0;
+ try {
+ byte[] buf = new byte[1024 * 1024];
+ InputStream is = new FileInputStream(datafile);
+ while (sofar < length) {
+ int i = buf.length;
+ if (sofar + i > length) {
+ i = (int) (length - sofar);
+ }
+ i = is.read(buf, 0, i);
+ if (i <= 0) {
+ throw new IOException("Unexpected problem reading data file " + datafile);
+ }
+ sofar += i;
+ os.write(buf, 0, i);
+ }
+ is.close();
+ os.close();
+ } catch (IOException ioe) {
+ dth.reportDeliveryExtra(this, sofar);
+ throw ioe;
+ }
+ }
+ }
+ int rc = uc.getResponseCode();
+ String rmsg = uc.getResponseMessage();
+ if (rmsg == null) {
+ String h0 = uc.getHeaderField(0);
+ if (h0 != null) {
+ int i = h0.indexOf(' ');
+ int j = h0.indexOf(' ', i + 1);
+ if (i != -1 && j != -1) {
+ rmsg = h0.substring(j + 1);
+ }
+ }
+ }
+ String xpubid = null;
+ InputStream is;
+ if (rc >= 200 && rc <= 299) {
+ is = uc.getInputStream();
+ xpubid = uc.getHeaderField("X-ATT-DR-PUBLISH-ID");
+ } else {
+ if (rc >= 300 && rc <= 399) {
+ rmsg = uc.getHeaderField("Location");
+ }
+ is = uc.getErrorStream();
+ }
+ byte[] buf = new byte[4096];
+ if (is != null) {
+ while (is.read(buf) > 0) {
+ }
+ is.close();
+ }
+ dth.reportStatus(this, rc, xpubid, rmsg);
+ } catch (Exception e) {
+ dth.reportException(this, e);
+ }
+ }
+
+ /**
+ * Remove meta and data files
+ */
+ public void clean() {
+ datafile.delete();
+ metafile.delete();
+ hdrs = null;
+ }
+
+ /**
+ * Has this delivery task been cleaned?
+ */
+ public boolean isCleaned() {
+ return (hdrs == null);
+ }
+
+ /**
+ * Get length of body
+ */
+ public long getLength() {
+ return (length);
+ }
+
+ /**
+ * Get creation date as encoded in the publish ID.
+ */
+ public long getDate() {
+ return (date);
+ }
+
+ /**
+ * Get the most recent delivery attempt URL
+ */
+ public String getURL() {
+ return (url);
+ }
+
+ /**
+ * Get the content type
+ */
+ public String getCType() {
+ return (ctype);
+ }
+
+ /**
+ * Get the method
+ */
+ public String getMethod() {
+ return (method);
+ }
+
+ /**
+ * Get the file ID
+ */
+ public String getFileId() {
+ return (fileid);
+ }
+
+ /**
+ * Get the number of delivery attempts
+ */
+ public int getAttempts() {
+ return (attempts);
+ }
+
+ /**
+ * Get the (space delimited list of) subscription ID for this delivery task
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the feed ID for this delivery task
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Interface to allow independent testing of the DeliveryTask code.\r
- * <p>\r
- * This interface represents all the configuraiton information and\r
- * feedback mechanisms that a delivery task needs.\r
- */\r
-\r
-public interface DeliveryTaskHelper {\r
- /**\r
- * Report that a delivery attempt failed due to an exception (like can't connect to remote host)\r
- * @param task The task that failed\r
- * @param exception The exception that occurred\r
- */\r
- public void reportException(DeliveryTask task, Exception exception);\r
- /**\r
- * Report that a delivery attempt completed (successfully or unsuccessfully)\r
- * @param task The task that failed\r
- * @param status The HTTP status\r
- * @param xpubid The publish ID from the far end (if any)\r
- * @param location The redirection location for a 3XX response\r
- */\r
- public void reportStatus(DeliveryTask task, int status, String xpubid, String location);\r
- /**\r
- * Report that a delivery attempt either failed while sending data or that an error was returned instead of a 100 Continue.\r
- * @param task The task that failed\r
- * @param sent The number of bytes sent or -1 if an error was returned instead of 100 Continue.\r
- */\r
- public void reportDeliveryExtra(DeliveryTask task, long sent);\r
- /**\r
- * Get the destination information for the delivery queue\r
- * @return The destination information\r
- */\r
- public DestInfo getDestInfo();\r
- /**\r
- * Given a file ID, get the URL to deliver to\r
- * @param fileid The file id\r
- * @return The URL to deliver to\r
- */\r
- public String getDestURL(String fileid);\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed iD\r
- */\r
- public String getFeedId(String subid);\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Interface to allow independent testing of the DeliveryTask code.
+ * <p>
+ * This interface represents all the configuraiton information and
+ * feedback mechanisms that a delivery task needs.
+ */
+
+public interface DeliveryTaskHelper {
+ /**
+ * Report that a delivery attempt failed due to an exception (like can't connect to remote host)
+ *
+ * @param task The task that failed
+ * @param exception The exception that occurred
+ */
+ public void reportException(DeliveryTask task, Exception exception);
+
+ /**
+ * Report that a delivery attempt completed (successfully or unsuccessfully)
+ *
+ * @param task The task that failed
+ * @param status The HTTP status
+ * @param xpubid The publish ID from the far end (if any)
+ * @param location The redirection location for a 3XX response
+ */
+ public void reportStatus(DeliveryTask task, int status, String xpubid, String location);
+
+ /**
+ * Report that a delivery attempt either failed while sending data or that an error was returned instead of a 100 Continue.
+ *
+ * @param task The task that failed
+ * @param sent The number of bytes sent or -1 if an error was returned instead of 100 Continue.
+ */
+ public void reportDeliveryExtra(DeliveryTask task, long sent);
+
+ /**
+ * Get the destination information for the delivery queue
+ *
+ * @return The destination information
+ */
+ public DestInfo getDestInfo();
+
+ /**
+ * Given a file ID, get the URL to deliver to
+ *
+ * @param fileid The file id
+ * @return The URL to deliver to
+ */
+ public String getDestURL(String fileid);
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed iD
+ */
+ public String getFeedId(String subid);
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Information for a delivery destination that doesn't change from message to message\r
- */\r
-public class DestInfo {\r
- private String name;\r
- private String spool;\r
- private String subid;\r
- private String logdata;\r
- private String url;\r
- private String authuser;\r
- private String authentication;\r
- private boolean metaonly;\r
- private boolean use100;\r
- /**\r
- * Create a destination information object.\r
- * @param name n:fqdn or s:subid\r
- * @param spool The directory where files are spooled.\r
- * @param subid The subscription ID (if applicable).\r
- * @param logdata Text to be included in log messages\r
- * @param url The URL to deliver to.\r
- * @param authuser The auth user for logging.\r
- * @param authentication The credentials.\r
- * @param metaonly Is this a metadata only delivery?\r
- * @param use100 Should I use expect 100-continue?\r
- */\r
- public DestInfo(String name, String spool, String subid, String logdata, String url, String authuser, String authentication, boolean metaonly, boolean use100) {\r
- this.name = name;\r
- this.spool = spool;\r
- this.subid = subid;\r
- this.logdata = logdata;\r
- this.url = url;\r
- this.authuser = authuser;\r
- this.authentication = authentication;\r
- this.metaonly = metaonly;\r
- this.use100 = use100;\r
- }\r
- public boolean equals(Object o) {\r
- return((o instanceof DestInfo) && ((DestInfo)o).spool.equals(spool));\r
- }\r
- public int hashCode() {\r
- return(spool.hashCode());\r
- }\r
- /**\r
- * Get the name of this destination\r
- */\r
- public String getName() {\r
- return(name);\r
- }\r
- /**\r
- * Get the spool directory for this destination.\r
- * @return The spool directory\r
- */\r
- public String getSpool() {\r
- return(spool);\r
- }\r
- /**\r
- * Get the subscription ID.\r
- * @return Subscription ID or null if this is a node to node delivery.\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the log data.\r
- * @return Text to be included in a log message about delivery attempts.\r
- */\r
- public String getLogData() {\r
- return(logdata);\r
- }\r
- /**\r
- * Get the delivery URL.\r
- * @return The URL to deliver to (the primary URL).\r
- */\r
- public String getURL() {\r
- return(url);\r
-\r
- }\r
- /**\r
- * Get the user for authentication\r
- * @return The name of the user for logging\r
- */\r
- public String getAuthUser() {\r
- return(authuser);\r
- }\r
- /**\r
- * Get the authentication header\r
- * @return The string to use to authenticate to the recipient.\r
- */\r
- public String getAuth() {\r
- return(authentication);\r
- }\r
- /**\r
- * Is this a metadata only delivery?\r
- * @return True if this is a metadata only delivery\r
- */\r
- public boolean isMetaDataOnly() {\r
- return(metaonly);\r
- }\r
- /**\r
- * Should I send expect 100-continue header?\r
- * @return True if I should.\r
- */\r
- public boolean isUsing100() {\r
- return(use100);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Information for a delivery destination that doesn't change from message to message
+ */
+public class DestInfo {
+ private String name;
+ private String spool;
+ private String subid;
+ private String logdata;
+ private String url;
+ private String authuser;
+ private String authentication;
+ private boolean metaonly;
+ private boolean use100;
+
+ /**
+ * Create a destination information object.
+ *
+ * @param name n:fqdn or s:subid
+ * @param spool The directory where files are spooled.
+ * @param subid The subscription ID (if applicable).
+ * @param logdata Text to be included in log messages
+ * @param url The URL to deliver to.
+ * @param authuser The auth user for logging.
+ * @param authentication The credentials.
+ * @param metaonly Is this a metadata only delivery?
+ * @param use100 Should I use expect 100-continue?
+ */
+ public DestInfo(String name, String spool, String subid, String logdata, String url, String authuser, String authentication, boolean metaonly, boolean use100) {
+ this.name = name;
+ this.spool = spool;
+ this.subid = subid;
+ this.logdata = logdata;
+ this.url = url;
+ this.authuser = authuser;
+ this.authentication = authentication;
+ this.metaonly = metaonly;
+ this.use100 = use100;
+ }
+
+ public boolean equals(Object o) {
+ return ((o instanceof DestInfo) && ((DestInfo) o).spool.equals(spool));
+ }
+
+ public int hashCode() {
+ return (spool.hashCode());
+ }
+
+ /**
+ * Get the name of this destination
+ */
+ public String getName() {
+ return (name);
+ }
+
+ /**
+ * Get the spool directory for this destination.
+ *
+ * @return The spool directory
+ */
+ public String getSpool() {
+ return (spool);
+ }
+
+ /**
+ * Get the subscription ID.
+ *
+ * @return Subscription ID or null if this is a node to node delivery.
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the log data.
+ *
+ * @return Text to be included in a log message about delivery attempts.
+ */
+ public String getLogData() {
+ return (logdata);
+ }
+
+ /**
+ * Get the delivery URL.
+ *
+ * @return The URL to deliver to (the primary URL).
+ */
+ public String getURL() {
+ return (url);
+
+ }
+
+ /**
+ * Get the user for authentication
+ *
+ * @return The name of the user for logging
+ */
+ public String getAuthUser() {
+ return (authuser);
+ }
+
+ /**
+ * Get the authentication header
+ *
+ * @return The string to use to authenticate to the recipient.
+ */
+ public String getAuth() {
+ return (authentication);
+ }
+
+ /**
+ * Is this a metadata only delivery?
+ *
+ * @return True if this is a metadata only delivery
+ */
+ public boolean isMetaDataOnly() {
+ return (metaonly);
+ }
+
+ /**
+ * Should I send expect 100-continue header?
+ *
+ * @return True if I should.
+ */
+ public boolean isUsing100() {
+ return (use100);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.net.*;\r
-\r
-/**\r
- * Determine if an IP address is from a machine\r
- */\r
-public class IsFrom {\r
- private long nextcheck;\r
- private String[] ips;\r
- private String fqdn;\r
- /**\r
- * Configure the JVM DNS cache to have a 10 second TTL. This needs to be called very very early or it won't have any effect.\r
- */\r
- public static void setDNSCache() {\r
- java.security.Security.setProperty("networkaddress.cache.ttl", "10");\r
- }\r
- /**\r
- * Create an IsFrom for the specified fully qualified domain name.\r
- */\r
- public IsFrom(String fqdn) {\r
- this.fqdn = fqdn;\r
- }\r
- /**\r
- * Check if an IP address matches. If it has been more than\r
- * 10 seconds since DNS was last checked for changes to the\r
- * IP address(es) of this FQDN, check again. Then check\r
- * if the specified IP address belongs to the FQDN.\r
- */\r
- public synchronized boolean isFrom(String ip) {\r
- long now = System.currentTimeMillis();\r
- if (now > nextcheck) {\r
- nextcheck = now + 10000;\r
- Vector<String> v = new Vector<String>();\r
- try {\r
- InetAddress[] addrs = InetAddress.getAllByName(fqdn);\r
- for (InetAddress a: addrs) {\r
- v.add(a.getHostAddress());\r
- }\r
- } catch (Exception e) {\r
- }\r
- ips = v.toArray(new String[v.size()]);\r
- }\r
- for (String s: ips) {\r
- if (s.equals(ip)) {\r
- return(true);\r
- }\r
- }\r
- return(false);\r
- }\r
- /**\r
- * Return the fully qualified domain name\r
- */\r
- public String toString() {\r
- return(fqdn);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.net.*;
+
+/**
+ * Determine if an IP address is from a machine
+ */
+public class IsFrom {
+ private long nextcheck;
+ private String[] ips;
+ private String fqdn;
+
+ /**
+ * Configure the JVM DNS cache to have a 10 second TTL. This needs to be called very very early or it won't have any effect.
+ */
+ public static void setDNSCache() {
+ java.security.Security.setProperty("networkaddress.cache.ttl", "10");
+ }
+
+ /**
+ * Create an IsFrom for the specified fully qualified domain name.
+ */
+ public IsFrom(String fqdn) {
+ this.fqdn = fqdn;
+ }
+
+ /**
+ * Check if an IP address matches. If it has been more than
+ * 10 seconds since DNS was last checked for changes to the
+ * IP address(es) of this FQDN, check again. Then check
+ * if the specified IP address belongs to the FQDN.
+ */
+ public synchronized boolean isFrom(String ip) {
+ long now = System.currentTimeMillis();
+ if (now > nextcheck) {
+ nextcheck = now + 10000;
+ Vector<String> v = new Vector<String>();
+ try {
+ InetAddress[] addrs = InetAddress.getAllByName(fqdn);
+ for (InetAddress a : addrs) {
+ v.add(a.getHostAddress());
+ }
+ } catch (Exception e) {
+ }
+ ips = v.toArray(new String[v.size()]);
+ }
+ for (String s : ips) {
+ if (s.equals(ip)) {
+ return (true);
+ }
+ }
+ return (false);
+ }
+
+ /**
+ * Return the fully qualified domain name
+ */
+ public String toString() {
+ return (fqdn);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.util.regex.*;\r
-import java.io.*;\r
-import java.nio.file.*;\r
-import java.text.*;\r
-\r
-/**\r
- * Cleanup of old log files.\r
- * <p>\r
- * Periodically scan the log directory for log files that are older than\r
- * the log file retention interval, and delete them. In a future release,\r
- * This class will also be responsible for uploading events logs to the\r
- * log server to support the log query APIs.\r
- */\r
-\r
-public class LogManager extends TimerTask {\r
- private NodeConfigManager config;\r
- private Matcher isnodelog;\r
- private Matcher iseventlog;\r
- private Uploader worker;\r
- private String uploaddir;\r
- private String logdir;\r
- private class Uploader extends Thread implements DeliveryQueueHelper {\r
- public long getInitFailureTimer() { return(10000L); }\r
- public double getFailureBackoff() { return(2.0); }\r
- public long getMaxFailureTimer() { return(150000L); }\r
- public long getExpirationTimer() { return(604800000L); }\r
- public int getFairFileLimit() { return(10000); }\r
- public long getFairTimeLimit() { return(86400000); }\r
- public String getDestURL(DestInfo dest, String fileid) {\r
- return(config.getEventLogUrl());\r
- }\r
- public void handleUnreachable(DestInfo dest) {}\r
- public boolean handleRedirection(DestInfo dest, String location, String fileid) { return(false); }\r
- public boolean isFollowRedirects() { return(false); }\r
- public String getFeedId(String subid) { return(null); }\r
- private DeliveryQueue dq;\r
- public Uploader() {\r
- dq = new DeliveryQueue(this, new DestInfo("LogUpload", uploaddir, null, null, null, config.getMyName(), config.getMyAuth(), false, false));\r
- setDaemon(true);\r
- setName("Log Uploader");\r
- start();\r
- }\r
- private synchronized void snooze() {\r
- try {\r
- wait(10000);\r
- } catch (Exception e) {\r
- }\r
- }\r
- private synchronized void poke() {\r
- notify();\r
- }\r
- public void run() {\r
- while (true) {\r
- scan();\r
- dq.run();\r
- snooze();\r
- }\r
- }\r
- private void scan() {\r
- long threshold = System.currentTimeMillis() - config.getLogRetention();\r
- File dir = new File(logdir);\r
- String[] fns = dir.list();\r
- Arrays.sort(fns);\r
- String lastqueued = "events-000000000000.log";\r
- String curlog = StatusLog.getCurLogFile();\r
- curlog = curlog.substring(curlog.lastIndexOf('/') + 1);\r
- try {\r
- Writer w = new FileWriter(uploaddir + "/.meta");\r
- w.write("POST\tlogdata\nContent-Type\ttext/plain\n");\r
- w.close();\r
- BufferedReader br = new BufferedReader(new FileReader(uploaddir + "/.lastqueued"));\r
- lastqueued = br.readLine();\r
- br.close();\r
- } catch (Exception e) {\r
- }\r
- for (String fn: fns) {\r
- if (!isnodelog.reset(fn).matches()) {\r
- if (!iseventlog.reset(fn).matches()) {\r
- continue;\r
- }\r
- if (lastqueued.compareTo(fn) < 0 && curlog.compareTo(fn) > 0) {\r
- lastqueued = fn;\r
- try {\r
- String pid = config.getPublishId();\r
- Files.createLink(Paths.get(uploaddir + "/" + pid), Paths.get(logdir + "/" + fn));\r
- Files.createLink(Paths.get(uploaddir + "/" + pid + ".M"), Paths.get(uploaddir + "/.meta"));\r
- } catch (Exception e) {\r
- }\r
- }\r
- }\r
- File f = new File(dir, fn);\r
- if (f.lastModified() < threshold) {\r
- f.delete();\r
- }\r
- }\r
- try {\r
- (new File(uploaddir + "/.meta")).delete();\r
- Writer w = new FileWriter(uploaddir + "/.lastqueued");\r
- w.write(lastqueued + "\n");\r
- w.close();\r
- } catch (Exception e) {\r
- }\r
- }\r
- }\r
- /**\r
- * Construct a log manager\r
- * <p>\r
- * The log manager will check for expired log files every 5 minutes\r
- * at 20 seconds after the 5 minute boundary. (Actually, the\r
- * interval is the event log rollover interval, which\r
- * defaults to 5 minutes).\r
- */\r
- public LogManager(NodeConfigManager config) {\r
- this.config = config;\r
- try {\r
- isnodelog = Pattern.compile("node\\.log\\.\\d{8}").matcher("");\r
- iseventlog = Pattern.compile("events-\\d{12}\\.log").matcher("");\r
- } catch (Exception e) {}\r
- logdir = config.getLogDir();\r
- uploaddir = logdir + "/.spool";\r
- (new File(uploaddir)).mkdirs();\r
- long now = System.currentTimeMillis();\r
- long intvl = StatusLog.parseInterval(config.getEventLogInterval(), 300000);\r
- long when = now - now % intvl + intvl + 20000L;\r
- config.getTimer().scheduleAtFixedRate(this, when - now, intvl);\r
- worker = new Uploader();\r
- }\r
- /**\r
- * Trigger check for expired log files and log files to upload\r
- */\r
- public void run() {\r
- worker.poke();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.util.regex.*;
+import java.io.*;
+import java.nio.file.*;
+import java.text.*;
+
+/**
+ * Cleanup of old log files.
+ * <p>
+ * Periodically scan the log directory for log files that are older than
+ * the log file retention interval, and delete them. In a future release,
+ * This class will also be responsible for uploading events logs to the
+ * log server to support the log query APIs.
+ */
+
+public class LogManager extends TimerTask {
+ private NodeConfigManager config;
+ private Matcher isnodelog;
+ private Matcher iseventlog;
+ private Uploader worker;
+ private String uploaddir;
+ private String logdir;
+
+ private class Uploader extends Thread implements DeliveryQueueHelper {
+ public long getInitFailureTimer() {
+ return (10000L);
+ }
+
+ public double getFailureBackoff() {
+ return (2.0);
+ }
+
+ public long getMaxFailureTimer() {
+ return (150000L);
+ }
+
+ public long getExpirationTimer() {
+ return (604800000L);
+ }
+
+ public int getFairFileLimit() {
+ return (10000);
+ }
+
+ public long getFairTimeLimit() {
+ return (86400000);
+ }
+
+ public String getDestURL(DestInfo dest, String fileid) {
+ return (config.getEventLogUrl());
+ }
+
+ public void handleUnreachable(DestInfo dest) {
+ }
+
+ public boolean handleRedirection(DestInfo dest, String location, String fileid) {
+ return (false);
+ }
+
+ public boolean isFollowRedirects() {
+ return (false);
+ }
+
+ public String getFeedId(String subid) {
+ return (null);
+ }
+
+ private DeliveryQueue dq;
+
+ public Uploader() {
+ dq = new DeliveryQueue(this, new DestInfo("LogUpload", uploaddir, null, null, null, config.getMyName(), config.getMyAuth(), false, false));
+ setDaemon(true);
+ setName("Log Uploader");
+ start();
+ }
+
+ private synchronized void snooze() {
+ try {
+ wait(10000);
+ } catch (Exception e) {
+ }
+ }
+
+ private synchronized void poke() {
+ notify();
+ }
+
+ public void run() {
+ while (true) {
+ scan();
+ dq.run();
+ snooze();
+ }
+ }
+
+ private void scan() {
+ long threshold = System.currentTimeMillis() - config.getLogRetention();
+ File dir = new File(logdir);
+ String[] fns = dir.list();
+ Arrays.sort(fns);
+ String lastqueued = "events-000000000000.log";
+ String curlog = StatusLog.getCurLogFile();
+ curlog = curlog.substring(curlog.lastIndexOf('/') + 1);
+ try {
+ Writer w = new FileWriter(uploaddir + "/.meta");
+ w.write("POST\tlogdata\nContent-Type\ttext/plain\n");
+ w.close();
+ BufferedReader br = new BufferedReader(new FileReader(uploaddir + "/.lastqueued"));
+ lastqueued = br.readLine();
+ br.close();
+ } catch (Exception e) {
+ }
+ for (String fn : fns) {
+ if (!isnodelog.reset(fn).matches()) {
+ if (!iseventlog.reset(fn).matches()) {
+ continue;
+ }
+ if (lastqueued.compareTo(fn) < 0 && curlog.compareTo(fn) > 0) {
+ lastqueued = fn;
+ try {
+ String pid = config.getPublishId();
+ Files.createLink(Paths.get(uploaddir + "/" + pid), Paths.get(logdir + "/" + fn));
+ Files.createLink(Paths.get(uploaddir + "/" + pid + ".M"), Paths.get(uploaddir + "/.meta"));
+ } catch (Exception e) {
+ }
+ }
+ }
+ File f = new File(dir, fn);
+ if (f.lastModified() < threshold) {
+ f.delete();
+ }
+ }
+ try {
+ (new File(uploaddir + "/.meta")).delete();
+ Writer w = new FileWriter(uploaddir + "/.lastqueued");
+ w.write(lastqueued + "\n");
+ w.close();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ /**
+ * Construct a log manager
+ * <p>
+ * The log manager will check for expired log files every 5 minutes
+ * at 20 seconds after the 5 minute boundary. (Actually, the
+ * interval is the event log rollover interval, which
+ * defaults to 5 minutes).
+ */
+ public LogManager(NodeConfigManager config) {
+ this.config = config;
+ try {
+ isnodelog = Pattern.compile("node\\.log\\.\\d{8}").matcher("");
+ iseventlog = Pattern.compile("events-\\d{12}\\.log").matcher("");
+ } catch (Exception e) {
+ }
+ logdir = config.getLogDir();
+ uploaddir = logdir + "/.spool";
+ (new File(uploaddir)).mkdirs();
+ long now = System.currentTimeMillis();
+ long intvl = StatusLog.parseInterval(config.getEventLogInterval(), 300000);
+ long when = now - now % intvl + intvl + 20000L;
+ config.getTimer().scheduleAtFixedRate(this, when - now, intvl);
+ worker = new Uploader();
+ }
+
+ /**
+ * Trigger check for expired log files and log files to upload
+ */
+ public void run() {
+ worker.poke();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.io.*;\r
-\r
-/**\r
- * Processed configuration for this node.\r
- * <p>\r
- * The NodeConfig represents a processed configuration from the Data Router\r
- * provisioning server. Each time configuration data is received from the\r
- * provisioning server, a new NodeConfig is created and the previous one\r
- * discarded.\r
- */\r
-public class NodeConfig {\r
- /**\r
- * Raw configuration entry for a data router node\r
- */\r
- public static class ProvNode {\r
- private String cname;\r
- /**\r
- * Construct a node configuration entry.\r
- * @param cname The cname of the node.\r
- */\r
- public ProvNode(String cname) {\r
- this.cname = cname;\r
- }\r
- /**\r
- * Get the cname of the node\r
- */\r
- public String getCName() {\r
- return(cname);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a provisioning parameter\r
- */\r
- public static class ProvParam {\r
- private String name;\r
- private String value;\r
- /**\r
- * Construct a provisioning parameter configuration entry.\r
- * @param name The name of the parameter.\r
- * @param value The value of the parameter.\r
- */\r
- public ProvParam(String name, String value) {\r
- this.name = name;\r
- this.value = value;\r
- }\r
- /**\r
- * Get the name of the parameter.\r
- */\r
- public String getName() {\r
- return(name);\r
- }\r
- /**\r
- * Get the value of the parameter.\r
- */\r
- public String getValue() {\r
- return(value);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a data feed.\r
- */\r
- public static class ProvFeed {\r
- private String id;\r
- private String logdata;\r
- private String status;\r
- /**\r
- * Construct a feed configuration entry.\r
- * @param id The feed ID of the entry.\r
- * @param logdata String for log entries about the entry.\r
- * @param status The reason why this feed cannot be used (Feed has been deleted, Feed has been suspended) or null if it is valid.\r
- */\r
- public ProvFeed(String id, String logdata, String status) {\r
- this.id = id;\r
- this.logdata = logdata;\r
- this.status = status;\r
- }\r
- /**\r
- * Get the feed id of the data feed.\r
- */\r
- public String getId() {\r
- return(id);\r
- }\r
- /**\r
- * Get the log data of the data feed.\r
- */\r
- public String getLogData() {\r
- return(logdata);\r
- }\r
- /**\r
- * Get the status of the data feed.\r
- */\r
- public String getStatus() {\r
- return(status);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a feed user.\r
- */\r
- public static class ProvFeedUser {\r
- private String feedid;\r
- private String user;\r
- private String credentials;\r
- /**\r
- * Construct a feed user configuration entry\r
- * @param feedid The feed id.\r
- * @param user The user that will publish to the feed.\r
- * @param credentials The Authorization header the user will use to publish.\r
- */\r
- public ProvFeedUser(String feedid, String user, String credentials) {\r
- this.feedid = feedid;\r
- this.user = user;\r
- this.credentials = credentials;\r
- }\r
- /**\r
- * Get the feed id of the feed user.\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the user for the feed user.\r
- */\r
- public String getUser() {\r
- return(user);\r
- }\r
- /**\r
- * Get the credentials for the feed user.\r
- */\r
- public String getCredentials() {\r
- return(credentials);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a feed subnet\r
- */\r
- public static class ProvFeedSubnet {\r
- private String feedid;\r
- private String cidr;\r
- /**\r
- * Construct a feed subnet configuration entry\r
- * @param feedid The feed ID\r
- * @param cidr The CIDR allowed to publish to the feed.\r
- */\r
- public ProvFeedSubnet(String feedid, String cidr) {\r
- this.feedid = feedid;\r
- this.cidr = cidr;\r
- }\r
- /**\r
- * Get the feed id of the feed subnet.\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the CIDR of the feed subnet.\r
- */\r
- public String getCidr() {\r
- return(cidr);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for a subscription\r
- */\r
- public static class ProvSubscription {\r
- private String subid;\r
- private String feedid;\r
- private String url;\r
- private String authuser;\r
- private String credentials;\r
- private boolean metaonly;\r
- private boolean use100;\r
- /**\r
- * Construct a subscription configuration entry\r
- * @param subid The subscription ID\r
- * @param feedid The feed ID\r
- * @param url The base delivery URL (not including the fileid)\r
- * @param authuser The user in the credentials used to deliver\r
- * @param credentials The credentials used to authenticate to the delivery URL exactly as they go in the Authorization header.\r
- * @param metaonly Is this a meta data only subscription?\r
- * @param use100 Should we send Expect: 100-continue?\r
- */\r
- public ProvSubscription(String subid, String feedid, String url, String authuser, String credentials, boolean metaonly, boolean use100) {\r
- this.subid = subid;\r
- this.feedid = feedid;\r
- this.url = url;\r
- this.authuser = authuser;\r
- this.credentials = credentials;\r
- this.metaonly = metaonly;\r
- this.use100 = use100;\r
- }\r
- /**\r
- * Get the subscription ID\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the feed ID\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the delivery URL\r
- */\r
- public String getURL() {\r
- return(url);\r
- }\r
- /**\r
- * Get the user\r
- */\r
- public String getAuthUser() {\r
- return(authuser);\r
- }\r
- /**\r
- * Get the delivery credentials\r
- */\r
- public String getCredentials() {\r
- return(credentials);\r
- }\r
- /**\r
- * Is this a meta data only subscription?\r
- */\r
- public boolean isMetaDataOnly() {\r
- return(metaonly);\r
- }\r
- /**\r
- * Should we send Expect: 100-continue?\r
- */\r
- public boolean isUsing100() {\r
- return(use100);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for controlled ingress to the data router node\r
- */\r
- public static class ProvForceIngress {\r
- private String feedid;\r
- private String subnet;\r
- private String user;\r
- private String[] nodes;\r
- /**\r
- * Construct a forced ingress configuration entry\r
- * @param feedid The feed ID that this entry applies to\r
- * @param subnet The CIDR for which publisher IP addresses this entry applies to or "" if it applies to all publisher IP addresses\r
- * @param user The publishing user this entry applies to or "" if it applies to all publishing users.\r
- * @param nodes The array of FQDNs of the data router nodes to redirect publication attempts to.\r
- */\r
- public ProvForceIngress(String feedid, String subnet, String user, String[] nodes) {\r
- this.feedid = feedid;\r
- this.subnet = subnet;\r
- this.user = user;\r
- this.nodes = nodes;\r
- }\r
- /**\r
- * Get the feed ID\r
- */\r
- public String getFeedId() {\r
- return(feedid);\r
- }\r
- /**\r
- * Get the subnet\r
- */\r
- public String getSubnet() {\r
- return(subnet);\r
- }\r
- /**\r
- * Get the user\r
- */\r
- public String getUser() {\r
- return(user);\r
- }\r
- /**\r
- * Get the node\r
- */\r
- public String[] getNodes() {\r
- return(nodes);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for controlled egress from the data router\r
- */\r
- public static class ProvForceEgress {\r
- private String subid;\r
- private String node;\r
- /**\r
- * Construct a forced egress configuration entry\r
- * @param subid The subscription ID the subscription with forced egress\r
- * @param node The node handling deliveries for this subscription\r
- */\r
- public ProvForceEgress(String subid, String node) {\r
- this.subid = subid;\r
- this.node = node;\r
- }\r
- /**\r
- * Get the subscription ID\r
- */\r
- public String getSubId() {\r
- return(subid);\r
- }\r
- /**\r
- * Get the node\r
- */\r
- public String getNode() {\r
- return(node);\r
- }\r
- }\r
- /**\r
- * Raw configuration entry for routing within the data router network\r
- */\r
- public static class ProvHop {\r
- private String from;\r
- private String to;\r
- private String via;\r
- /**\r
- * A human readable description of this entry\r
- */\r
- public String toString() {\r
- return("Hop " + from + "->" + to + " via " + via);\r
- }\r
- /**\r
- * Construct a hop entry\r
- * @param from The FQDN of the node with the data to be delivered\r
- * @param to The FQDN of the node that will deliver to the subscriber\r
- * @param via The FQDN of the node where the from node should send the data\r
- */\r
- public ProvHop(String from, String to, String via) {\r
- this.from = from;\r
- this.to = to;\r
- this.via = via;\r
- }\r
- /**\r
- * Get the from node\r
- */\r
- public String getFrom() {\r
- return(from);\r
- }\r
- /**\r
- * Get the to node\r
- */\r
- public String getTo() {\r
- return(to);\r
- }\r
- /**\r
- * Get the next intermediate node\r
- */\r
- public String getVia() {\r
- return(via);\r
- }\r
- }\r
- private static class Redirection {\r
- public SubnetMatcher snm;\r
- public String user;\r
- public String[] nodes;\r
- }\r
- private static class Feed {\r
- public String loginfo;\r
- public String status;\r
- public SubnetMatcher[] subnets;\r
- public Hashtable<String, String> authusers = new Hashtable<String, String>();\r
- public Redirection[] redirections;\r
- public Target[] targets;\r
- }\r
- private Hashtable<String, String> params = new Hashtable<String, String>();\r
- private Hashtable<String, Feed> feeds = new Hashtable<String, Feed>();\r
- private Hashtable<String, DestInfo> nodeinfo = new Hashtable<String, DestInfo>();\r
- private Hashtable<String, DestInfo> subinfo = new Hashtable<String, DestInfo>();\r
- private Hashtable<String, IsFrom> nodes = new Hashtable<String, IsFrom>();\r
- private String myname;\r
- private String myauth;\r
- private DestInfo[] alldests;\r
- private int rrcntr;\r
- /**\r
- * Process the raw provisioning data to configure this node\r
- * @param pd The parsed provisioning data\r
- * @param myname My name as seen by external systems\r
- * @param spooldir The directory where temporary files live\r
- * @param port The port number for URLs\r
- * @param nodeauthkey The keying string used to generate node authentication credentials\r
- */\r
- public NodeConfig(ProvData pd, String myname, String spooldir, int port, String nodeauthkey) {\r
- this.myname = myname;\r
- for (ProvParam p: pd.getParams()) {\r
- params.put(p.getName(), p.getValue());\r
- }\r
- Vector<DestInfo> div = new Vector<DestInfo>();\r
- myauth = NodeUtils.getNodeAuthHdr(myname, nodeauthkey);\r
- for (ProvNode pn: pd.getNodes()) {\r
- String cn = pn.getCName();\r
- if (nodeinfo.get(cn) != null) {\r
- continue;\r
- }\r
- String auth = NodeUtils.getNodeAuthHdr(cn, nodeauthkey);\r
- DestInfo di = new DestInfo("n:" + cn, spooldir + "/n/" + cn, null, "n2n-" + cn, "https://" + cn + ":" + port + "/internal/publish", cn, myauth, false, true);\r
- (new File(di.getSpool())).mkdirs();\r
- div.add(di);\r
- nodeinfo.put(cn, di);\r
- nodes.put(auth, new IsFrom(cn));\r
- }\r
- PathFinder pf = new PathFinder(myname, nodeinfo.keySet().toArray(new String[nodeinfo.size()]), pd.getHops());\r
- Hashtable<String, Vector<Redirection>> rdtab = new Hashtable<String, Vector<Redirection>>();\r
- for (ProvForceIngress pfi: pd.getForceIngress()) {\r
- Vector<Redirection> v = rdtab.get(pfi.getFeedId());\r
- if (v == null) {\r
- v = new Vector<Redirection>();\r
- rdtab.put(pfi.getFeedId(), v);\r
- }\r
- Redirection r = new Redirection();\r
- if (pfi.getSubnet() != null) {\r
- r.snm = new SubnetMatcher(pfi.getSubnet());\r
- }\r
- r.user = pfi.getUser();\r
- r.nodes = pfi.getNodes();\r
- v.add(r);\r
- }\r
- Hashtable<String, Hashtable<String, String>> pfutab = new Hashtable<String, Hashtable<String, String>>();\r
- for (ProvFeedUser pfu: pd.getFeedUsers()) {\r
- Hashtable<String, String> t = pfutab.get(pfu.getFeedId());\r
- if (t == null) {\r
- t = new Hashtable<String, String>();\r
- pfutab.put(pfu.getFeedId(), t);\r
- }\r
- t.put(pfu.getCredentials(), pfu.getUser());\r
- }\r
- Hashtable<String, String> egrtab = new Hashtable<String, String>();\r
- for (ProvForceEgress pfe: pd.getForceEgress()) {\r
- if (pfe.getNode().equals(myname) || nodeinfo.get(pfe.getNode()) == null) {\r
- continue;\r
- }\r
- egrtab.put(pfe.getSubId(), pfe.getNode());\r
- }\r
- Hashtable<String, Vector<SubnetMatcher>> pfstab = new Hashtable<String, Vector<SubnetMatcher>>();\r
- for (ProvFeedSubnet pfs: pd.getFeedSubnets()) {\r
- Vector<SubnetMatcher> v = pfstab.get(pfs.getFeedId());\r
- if (v == null) {\r
- v = new Vector<SubnetMatcher>();\r
- pfstab.put(pfs.getFeedId(), v);\r
- }\r
- v.add(new SubnetMatcher(pfs.getCidr()));\r
- }\r
- Hashtable<String, StringBuffer> ttab = new Hashtable<String, StringBuffer>();\r
- HashSet<String> allfeeds = new HashSet<String>();\r
- for (ProvFeed pfx: pd.getFeeds()) {\r
- if (pfx.getStatus() == null) {\r
- allfeeds.add(pfx.getId());\r
- }\r
- }\r
- for (ProvSubscription ps: pd.getSubscriptions()) {\r
- String sid = ps.getSubId();\r
- String fid = ps.getFeedId();\r
- if (!allfeeds.contains(fid)) {\r
- continue;\r
- }\r
- if (subinfo.get(sid) != null) {\r
- continue;\r
- }\r
- int sididx = 999;\r
- try {\r
- sididx = Integer.parseInt(sid);\r
- sididx -= sididx % 100;\r
- } catch (Exception e) {\r
- }\r
- String siddir = sididx + "/" + sid;\r
- DestInfo di = new DestInfo("s:" + sid, spooldir + "/s/" + siddir, sid, fid, ps.getURL(), ps.getAuthUser(), ps.getCredentials(), ps.isMetaDataOnly(), ps.isUsing100());\r
- (new File(di.getSpool())).mkdirs();\r
- div.add(di);\r
- subinfo.put(sid, di);\r
- String egr = egrtab.get(sid);\r
- if (egr != null) {\r
- sid = pf.getPath(egr) + sid;\r
- }\r
- StringBuffer sb = ttab.get(fid);\r
- if (sb == null) {\r
- sb = new StringBuffer();\r
- ttab.put(fid, sb);\r
- }\r
- sb.append(' ').append(sid);\r
- }\r
- alldests = div.toArray(new DestInfo[div.size()]);\r
- for (ProvFeed pfx: pd.getFeeds()) {\r
- String fid = pfx.getId();\r
- Feed f = feeds.get(fid);\r
- if (f != null) {\r
- continue;\r
- }\r
- f = new Feed();\r
- feeds.put(fid, f);\r
- f.loginfo = pfx.getLogData();\r
- f.status = pfx.getStatus();\r
- Vector<SubnetMatcher> v1 = pfstab.get(fid);\r
- if (v1 == null) {\r
- f.subnets = new SubnetMatcher[0];\r
- } else {\r
- f.subnets = v1.toArray(new SubnetMatcher[v1.size()]);\r
- }\r
- Hashtable<String, String> h1 = pfutab.get(fid);\r
- if (h1 == null) {\r
- h1 = new Hashtable<String, String>();\r
- }\r
- f.authusers = h1;\r
- Vector<Redirection> v2 = rdtab.get(fid);\r
- if (v2 == null) {\r
- f.redirections = new Redirection[0];\r
- } else {\r
- f.redirections = v2.toArray(new Redirection[v2.size()]);\r
- }\r
- StringBuffer sb = ttab.get(fid);\r
- if (sb == null) {\r
- f.targets = new Target[0];\r
- } else {\r
- f.targets = parseRouting(sb.toString());\r
- }\r
- }\r
- }\r
- /**\r
- * Parse a target string into an array of targets\r
- * @param routing Target string\r
- * @return Array of targets.\r
- */\r
- public Target[] parseRouting(String routing) {\r
- routing = routing.trim();\r
- if ("".equals(routing)) {\r
- return(new Target[0]);\r
- }\r
- String[] xx = routing.split("\\s+");\r
- Hashtable<String, Target> tmap = new Hashtable<String, Target>();\r
- HashSet<String> subset = new HashSet<String>();\r
- Vector<Target> tv = new Vector<Target>();\r
- Target[] ret = new Target[xx.length];\r
- for (int i = 0; i < xx.length; i++) {\r
- String t = xx[i];\r
- int j = t.indexOf('/');\r
- if (j == -1) {\r
- DestInfo di = subinfo.get(t);\r
- if (di == null) {\r
- tv.add(new Target(null, t));\r
- } else {\r
- if (!subset.contains(t)) {\r
- subset.add(t);\r
- tv.add(new Target(di, null));\r
- }\r
- }\r
- } else {\r
- String node = t.substring(0, j);\r
- String rtg = t.substring(j + 1);\r
- DestInfo di = nodeinfo.get(node);\r
- if (di == null) {\r
- tv.add(new Target(null, t));\r
- } else {\r
- Target tt = tmap.get(node);\r
- if (tt == null) {\r
- tt = new Target(di, rtg);\r
- tmap.put(node, tt);\r
- tv.add(tt);\r
- } else {\r
- tt.addRouting(rtg);\r
- }\r
- }\r
- }\r
- }\r
- return(tv.toArray(new Target[tv.size()]));\r
- }\r
- /**\r
- * Check whether this is a valid node-to-node transfer\r
- * @param credentials Credentials offered by the supposed node\r
- * @param ip IP address the request came from\r
- */\r
- public boolean isAnotherNode(String credentials, String ip) {\r
- IsFrom n = nodes.get(credentials);\r
- return (n != null && n.isFrom(ip));\r
- }\r
- /**\r
- * Check whether publication is allowed.\r
- * @param feedid The ID of the feed being requested.\r
- * @param credentials The offered credentials\r
- * @param ip The requesting IP address\r
- */\r
- public String isPublishPermitted(String feedid, String credentials, String ip) {\r
- Feed f = feeds.get(feedid);\r
- String nf = "Feed does not exist";\r
- if (f != null) {\r
- nf = f.status;\r
- }\r
- if (nf != null) {\r
- return(nf);\r
- }\r
- String user = f.authusers.get(credentials);\r
- if (user == null) {\r
- return("Publisher not permitted for this feed");\r
- }\r
- if (f.subnets.length == 0) {\r
- return(null);\r
- }\r
- byte[] addr = NodeUtils.getInetAddress(ip);\r
- for (SubnetMatcher snm: f.subnets) {\r
- if (snm.matches(addr)) {\r
- return(null);\r
- }\r
- }\r
- return("Publisher not permitted for this feed");\r
- }\r
- /**\r
- * Get authenticated user\r
- */\r
- public String getAuthUser(String feedid, String credentials) {\r
- return(feeds.get(feedid).authusers.get(credentials));\r
- }\r
- /**\r
- * Check if the request should be redirected to a different ingress node\r
- */\r
- public String getIngressNode(String feedid, String user, String ip) {\r
- Feed f = feeds.get(feedid);\r
- if (f.redirections.length == 0) {\r
- return(null);\r
- }\r
- byte[] addr = NodeUtils.getInetAddress(ip);\r
- for (Redirection r: f.redirections) {\r
- if (r.user != null && !user.equals(r.user)) {\r
- continue;\r
- }\r
- if (r.snm != null && !r.snm.matches(addr)) {\r
- continue;\r
- }\r
- for (String n: r.nodes) {\r
- if (myname.equals(n)) {\r
- return(null);\r
- }\r
- }\r
- if (r.nodes.length == 0) {\r
- return(null);\r
- }\r
- return(r.nodes[rrcntr++ % r.nodes.length]);\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Get a provisioned configuration parameter\r
- */\r
- public String getProvParam(String name) {\r
- return(params.get(name));\r
- }\r
- /**\r
- * Get all the DestInfos\r
- */\r
- public DestInfo[] getAllDests() {\r
- return(alldests);\r
- }\r
- /**\r
- * Get the targets for a feed\r
- * @param feedid The feed ID\r
- * @return The targets this feed should be delivered to\r
- */\r
- public Target[] getTargets(String feedid) {\r
- if (feedid == null) {\r
- return(new Target[0]);\r
- }\r
- Feed f = feeds.get(feedid);\r
- if (f == null) {\r
- return(new Target[0]);\r
- }\r
- return(f.targets);\r
- }\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid) {\r
- DestInfo di = subinfo.get(subid);\r
- if (di == null) {\r
- return(null);\r
- }\r
- return(di.getLogData());\r
- }\r
- /**\r
- * Get the spool directory for a subscription\r
- * @param subid The subscription ID\r
- * @return The spool directory\r
- */\r
- public String getSpoolDir(String subid) {\r
- DestInfo di = subinfo.get(subid);\r
- if (di == null) {\r
- return(null);\r
- }\r
- return(di.getSpool());\r
- }\r
- /**\r
- * Get the Authorization value this node uses\r
- * @return The Authorization header value for this node\r
- */\r
- public String getMyAuth() {\r
- return(myauth);\r
- }\r
-\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.io.*;
+
+/**
+ * Processed configuration for this node.
+ * <p>
+ * The NodeConfig represents a processed configuration from the Data Router
+ * provisioning server. Each time configuration data is received from the
+ * provisioning server, a new NodeConfig is created and the previous one
+ * discarded.
+ */
+public class NodeConfig {
+ /**
+ * Raw configuration entry for a data router node
+ */
+ public static class ProvNode {
+ private String cname;
+
+ /**
+ * Construct a node configuration entry.
+ *
+ * @param cname The cname of the node.
+ */
+ public ProvNode(String cname) {
+ this.cname = cname;
+ }
+
+ /**
+ * Get the cname of the node
+ */
+ public String getCName() {
+ return (cname);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a provisioning parameter
+ */
+ public static class ProvParam {
+ private String name;
+ private String value;
+
+ /**
+ * Construct a provisioning parameter configuration entry.
+ *
+ * @param name The name of the parameter.
+ * @param value The value of the parameter.
+ */
+ public ProvParam(String name, String value) {
+ this.name = name;
+ this.value = value;
+ }
+
+ /**
+ * Get the name of the parameter.
+ */
+ public String getName() {
+ return (name);
+ }
+
+ /**
+ * Get the value of the parameter.
+ */
+ public String getValue() {
+ return (value);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a data feed.
+ */
+ public static class ProvFeed {
+ private String id;
+ private String logdata;
+ private String status;
+
+ /**
+ * Construct a feed configuration entry.
+ *
+ * @param id The feed ID of the entry.
+ * @param logdata String for log entries about the entry.
+ * @param status The reason why this feed cannot be used (Feed has been deleted, Feed has been suspended) or null if it is valid.
+ */
+ public ProvFeed(String id, String logdata, String status) {
+ this.id = id;
+ this.logdata = logdata;
+ this.status = status;
+ }
+
+ /**
+ * Get the feed id of the data feed.
+ */
+ public String getId() {
+ return (id);
+ }
+
+ /**
+ * Get the log data of the data feed.
+ */
+ public String getLogData() {
+ return (logdata);
+ }
+
+ /**
+ * Get the status of the data feed.
+ */
+ public String getStatus() {
+ return (status);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a feed user.
+ */
+ public static class ProvFeedUser {
+ private String feedid;
+ private String user;
+ private String credentials;
+
+ /**
+ * Construct a feed user configuration entry
+ *
+ * @param feedid The feed id.
+ * @param user The user that will publish to the feed.
+ * @param credentials The Authorization header the user will use to publish.
+ */
+ public ProvFeedUser(String feedid, String user, String credentials) {
+ this.feedid = feedid;
+ this.user = user;
+ this.credentials = credentials;
+ }
+
+ /**
+ * Get the feed id of the feed user.
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the user for the feed user.
+ */
+ public String getUser() {
+ return (user);
+ }
+
+ /**
+ * Get the credentials for the feed user.
+ */
+ public String getCredentials() {
+ return (credentials);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a feed subnet
+ */
+ public static class ProvFeedSubnet {
+ private String feedid;
+ private String cidr;
+
+ /**
+ * Construct a feed subnet configuration entry
+ *
+ * @param feedid The feed ID
+ * @param cidr The CIDR allowed to publish to the feed.
+ */
+ public ProvFeedSubnet(String feedid, String cidr) {
+ this.feedid = feedid;
+ this.cidr = cidr;
+ }
+
+ /**
+ * Get the feed id of the feed subnet.
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the CIDR of the feed subnet.
+ */
+ public String getCidr() {
+ return (cidr);
+ }
+ }
+
+ /**
+ * Raw configuration entry for a subscription
+ */
+ public static class ProvSubscription {
+ private String subid;
+ private String feedid;
+ private String url;
+ private String authuser;
+ private String credentials;
+ private boolean metaonly;
+ private boolean use100;
+
+ /**
+ * Construct a subscription configuration entry
+ *
+ * @param subid The subscription ID
+ * @param feedid The feed ID
+ * @param url The base delivery URL (not including the fileid)
+ * @param authuser The user in the credentials used to deliver
+ * @param credentials The credentials used to authenticate to the delivery URL exactly as they go in the Authorization header.
+ * @param metaonly Is this a meta data only subscription?
+ * @param use100 Should we send Expect: 100-continue?
+ */
+ public ProvSubscription(String subid, String feedid, String url, String authuser, String credentials, boolean metaonly, boolean use100) {
+ this.subid = subid;
+ this.feedid = feedid;
+ this.url = url;
+ this.authuser = authuser;
+ this.credentials = credentials;
+ this.metaonly = metaonly;
+ this.use100 = use100;
+ }
+
+ /**
+ * Get the subscription ID
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the feed ID
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the delivery URL
+ */
+ public String getURL() {
+ return (url);
+ }
+
+ /**
+ * Get the user
+ */
+ public String getAuthUser() {
+ return (authuser);
+ }
+
+ /**
+ * Get the delivery credentials
+ */
+ public String getCredentials() {
+ return (credentials);
+ }
+
+ /**
+ * Is this a meta data only subscription?
+ */
+ public boolean isMetaDataOnly() {
+ return (metaonly);
+ }
+
+ /**
+ * Should we send Expect: 100-continue?
+ */
+ public boolean isUsing100() {
+ return (use100);
+ }
+ }
+
+ /**
+ * Raw configuration entry for controlled ingress to the data router node
+ */
+ public static class ProvForceIngress {
+ private String feedid;
+ private String subnet;
+ private String user;
+ private String[] nodes;
+
+ /**
+ * Construct a forced ingress configuration entry
+ *
+ * @param feedid The feed ID that this entry applies to
+ * @param subnet The CIDR for which publisher IP addresses this entry applies to or "" if it applies to all publisher IP addresses
+ * @param user The publishing user this entry applies to or "" if it applies to all publishing users.
+ * @param nodes The array of FQDNs of the data router nodes to redirect publication attempts to.
+ */
+ public ProvForceIngress(String feedid, String subnet, String user, String[] nodes) {
+ this.feedid = feedid;
+ this.subnet = subnet;
+ this.user = user;
+ this.nodes = nodes;
+ }
+
+ /**
+ * Get the feed ID
+ */
+ public String getFeedId() {
+ return (feedid);
+ }
+
+ /**
+ * Get the subnet
+ */
+ public String getSubnet() {
+ return (subnet);
+ }
+
+ /**
+ * Get the user
+ */
+ public String getUser() {
+ return (user);
+ }
+
+ /**
+ * Get the node
+ */
+ public String[] getNodes() {
+ return (nodes);
+ }
+ }
+
+ /**
+ * Raw configuration entry for controlled egress from the data router
+ */
+ public static class ProvForceEgress {
+ private String subid;
+ private String node;
+
+ /**
+ * Construct a forced egress configuration entry
+ *
+ * @param subid The subscription ID the subscription with forced egress
+ * @param node The node handling deliveries for this subscription
+ */
+ public ProvForceEgress(String subid, String node) {
+ this.subid = subid;
+ this.node = node;
+ }
+
+ /**
+ * Get the subscription ID
+ */
+ public String getSubId() {
+ return (subid);
+ }
+
+ /**
+ * Get the node
+ */
+ public String getNode() {
+ return (node);
+ }
+ }
+
+ /**
+ * Raw configuration entry for routing within the data router network
+ */
+ public static class ProvHop {
+ private String from;
+ private String to;
+ private String via;
+
+ /**
+ * A human readable description of this entry
+ */
+ public String toString() {
+ return ("Hop " + from + "->" + to + " via " + via);
+ }
+
+ /**
+ * Construct a hop entry
+ *
+ * @param from The FQDN of the node with the data to be delivered
+ * @param to The FQDN of the node that will deliver to the subscriber
+ * @param via The FQDN of the node where the from node should send the data
+ */
+ public ProvHop(String from, String to, String via) {
+ this.from = from;
+ this.to = to;
+ this.via = via;
+ }
+
+ /**
+ * Get the from node
+ */
+ public String getFrom() {
+ return (from);
+ }
+
+ /**
+ * Get the to node
+ */
+ public String getTo() {
+ return (to);
+ }
+
+ /**
+ * Get the next intermediate node
+ */
+ public String getVia() {
+ return (via);
+ }
+ }
+
+ private static class Redirection {
+ public SubnetMatcher snm;
+ public String user;
+ public String[] nodes;
+ }
+
+ private static class Feed {
+ public String loginfo;
+ public String status;
+ public SubnetMatcher[] subnets;
+ public Hashtable<String, String> authusers = new Hashtable<String, String>();
+ public Redirection[] redirections;
+ public Target[] targets;
+ }
+
+ private Hashtable<String, String> params = new Hashtable<String, String>();
+ private Hashtable<String, Feed> feeds = new Hashtable<String, Feed>();
+ private Hashtable<String, DestInfo> nodeinfo = new Hashtable<String, DestInfo>();
+ private Hashtable<String, DestInfo> subinfo = new Hashtable<String, DestInfo>();
+ private Hashtable<String, IsFrom> nodes = new Hashtable<String, IsFrom>();
+ private String myname;
+ private String myauth;
+ private DestInfo[] alldests;
+ private int rrcntr;
+
+ /**
+ * Process the raw provisioning data to configure this node
+ *
+ * @param pd The parsed provisioning data
+ * @param myname My name as seen by external systems
+ * @param spooldir The directory where temporary files live
+ * @param port The port number for URLs
+ * @param nodeauthkey The keying string used to generate node authentication credentials
+ */
+ public NodeConfig(ProvData pd, String myname, String spooldir, int port, String nodeauthkey) {
+ this.myname = myname;
+ for (ProvParam p : pd.getParams()) {
+ params.put(p.getName(), p.getValue());
+ }
+ Vector<DestInfo> div = new Vector<DestInfo>();
+ myauth = NodeUtils.getNodeAuthHdr(myname, nodeauthkey);
+ for (ProvNode pn : pd.getNodes()) {
+ String cn = pn.getCName();
+ if (nodeinfo.get(cn) != null) {
+ continue;
+ }
+ String auth = NodeUtils.getNodeAuthHdr(cn, nodeauthkey);
+ DestInfo di = new DestInfo("n:" + cn, spooldir + "/n/" + cn, null, "n2n-" + cn, "https://" + cn + ":" + port + "/internal/publish", cn, myauth, false, true);
+ (new File(di.getSpool())).mkdirs();
+ div.add(di);
+ nodeinfo.put(cn, di);
+ nodes.put(auth, new IsFrom(cn));
+ }
+ PathFinder pf = new PathFinder(myname, nodeinfo.keySet().toArray(new String[nodeinfo.size()]), pd.getHops());
+ Hashtable<String, Vector<Redirection>> rdtab = new Hashtable<String, Vector<Redirection>>();
+ for (ProvForceIngress pfi : pd.getForceIngress()) {
+ Vector<Redirection> v = rdtab.get(pfi.getFeedId());
+ if (v == null) {
+ v = new Vector<Redirection>();
+ rdtab.put(pfi.getFeedId(), v);
+ }
+ Redirection r = new Redirection();
+ if (pfi.getSubnet() != null) {
+ r.snm = new SubnetMatcher(pfi.getSubnet());
+ }
+ r.user = pfi.getUser();
+ r.nodes = pfi.getNodes();
+ v.add(r);
+ }
+ Hashtable<String, Hashtable<String, String>> pfutab = new Hashtable<String, Hashtable<String, String>>();
+ for (ProvFeedUser pfu : pd.getFeedUsers()) {
+ Hashtable<String, String> t = pfutab.get(pfu.getFeedId());
+ if (t == null) {
+ t = new Hashtable<String, String>();
+ pfutab.put(pfu.getFeedId(), t);
+ }
+ t.put(pfu.getCredentials(), pfu.getUser());
+ }
+ Hashtable<String, String> egrtab = new Hashtable<String, String>();
+ for (ProvForceEgress pfe : pd.getForceEgress()) {
+ if (pfe.getNode().equals(myname) || nodeinfo.get(pfe.getNode()) == null) {
+ continue;
+ }
+ egrtab.put(pfe.getSubId(), pfe.getNode());
+ }
+ Hashtable<String, Vector<SubnetMatcher>> pfstab = new Hashtable<String, Vector<SubnetMatcher>>();
+ for (ProvFeedSubnet pfs : pd.getFeedSubnets()) {
+ Vector<SubnetMatcher> v = pfstab.get(pfs.getFeedId());
+ if (v == null) {
+ v = new Vector<SubnetMatcher>();
+ pfstab.put(pfs.getFeedId(), v);
+ }
+ v.add(new SubnetMatcher(pfs.getCidr()));
+ }
+ Hashtable<String, StringBuffer> ttab = new Hashtable<String, StringBuffer>();
+ HashSet<String> allfeeds = new HashSet<String>();
+ for (ProvFeed pfx : pd.getFeeds()) {
+ if (pfx.getStatus() == null) {
+ allfeeds.add(pfx.getId());
+ }
+ }
+ for (ProvSubscription ps : pd.getSubscriptions()) {
+ String sid = ps.getSubId();
+ String fid = ps.getFeedId();
+ if (!allfeeds.contains(fid)) {
+ continue;
+ }
+ if (subinfo.get(sid) != null) {
+ continue;
+ }
+ int sididx = 999;
+ try {
+ sididx = Integer.parseInt(sid);
+ sididx -= sididx % 100;
+ } catch (Exception e) {
+ }
+ String siddir = sididx + "/" + sid;
+ DestInfo di = new DestInfo("s:" + sid, spooldir + "/s/" + siddir, sid, fid, ps.getURL(), ps.getAuthUser(), ps.getCredentials(), ps.isMetaDataOnly(), ps.isUsing100());
+ (new File(di.getSpool())).mkdirs();
+ div.add(di);
+ subinfo.put(sid, di);
+ String egr = egrtab.get(sid);
+ if (egr != null) {
+ sid = pf.getPath(egr) + sid;
+ }
+ StringBuffer sb = ttab.get(fid);
+ if (sb == null) {
+ sb = new StringBuffer();
+ ttab.put(fid, sb);
+ }
+ sb.append(' ').append(sid);
+ }
+ alldests = div.toArray(new DestInfo[div.size()]);
+ for (ProvFeed pfx : pd.getFeeds()) {
+ String fid = pfx.getId();
+ Feed f = feeds.get(fid);
+ if (f != null) {
+ continue;
+ }
+ f = new Feed();
+ feeds.put(fid, f);
+ f.loginfo = pfx.getLogData();
+ f.status = pfx.getStatus();
+ Vector<SubnetMatcher> v1 = pfstab.get(fid);
+ if (v1 == null) {
+ f.subnets = new SubnetMatcher[0];
+ } else {
+ f.subnets = v1.toArray(new SubnetMatcher[v1.size()]);
+ }
+ Hashtable<String, String> h1 = pfutab.get(fid);
+ if (h1 == null) {
+ h1 = new Hashtable<String, String>();
+ }
+ f.authusers = h1;
+ Vector<Redirection> v2 = rdtab.get(fid);
+ if (v2 == null) {
+ f.redirections = new Redirection[0];
+ } else {
+ f.redirections = v2.toArray(new Redirection[v2.size()]);
+ }
+ StringBuffer sb = ttab.get(fid);
+ if (sb == null) {
+ f.targets = new Target[0];
+ } else {
+ f.targets = parseRouting(sb.toString());
+ }
+ }
+ }
+
+ /**
+ * Parse a target string into an array of targets
+ *
+ * @param routing Target string
+ * @return Array of targets.
+ */
+ public Target[] parseRouting(String routing) {
+ routing = routing.trim();
+ if ("".equals(routing)) {
+ return (new Target[0]);
+ }
+ String[] xx = routing.split("\\s+");
+ Hashtable<String, Target> tmap = new Hashtable<String, Target>();
+ HashSet<String> subset = new HashSet<String>();
+ Vector<Target> tv = new Vector<Target>();
+ Target[] ret = new Target[xx.length];
+ for (int i = 0; i < xx.length; i++) {
+ String t = xx[i];
+ int j = t.indexOf('/');
+ if (j == -1) {
+ DestInfo di = subinfo.get(t);
+ if (di == null) {
+ tv.add(new Target(null, t));
+ } else {
+ if (!subset.contains(t)) {
+ subset.add(t);
+ tv.add(new Target(di, null));
+ }
+ }
+ } else {
+ String node = t.substring(0, j);
+ String rtg = t.substring(j + 1);
+ DestInfo di = nodeinfo.get(node);
+ if (di == null) {
+ tv.add(new Target(null, t));
+ } else {
+ Target tt = tmap.get(node);
+ if (tt == null) {
+ tt = new Target(di, rtg);
+ tmap.put(node, tt);
+ tv.add(tt);
+ } else {
+ tt.addRouting(rtg);
+ }
+ }
+ }
+ }
+ return (tv.toArray(new Target[tv.size()]));
+ }
+
+ /**
+ * Check whether this is a valid node-to-node transfer
+ *
+ * @param credentials Credentials offered by the supposed node
+ * @param ip IP address the request came from
+ */
+ public boolean isAnotherNode(String credentials, String ip) {
+ IsFrom n = nodes.get(credentials);
+ return (n != null && n.isFrom(ip));
+ }
+
+ /**
+ * Check whether publication is allowed.
+ *
+ * @param feedid The ID of the feed being requested.
+ * @param credentials The offered credentials
+ * @param ip The requesting IP address
+ */
+ public String isPublishPermitted(String feedid, String credentials, String ip) {
+ Feed f = feeds.get(feedid);
+ String nf = "Feed does not exist";
+ if (f != null) {
+ nf = f.status;
+ }
+ if (nf != null) {
+ return (nf);
+ }
+ String user = f.authusers.get(credentials);
+ if (user == null) {
+ return ("Publisher not permitted for this feed");
+ }
+ if (f.subnets.length == 0) {
+ return (null);
+ }
+ byte[] addr = NodeUtils.getInetAddress(ip);
+ for (SubnetMatcher snm : f.subnets) {
+ if (snm.matches(addr)) {
+ return (null);
+ }
+ }
+ return ("Publisher not permitted for this feed");
+ }
+
+ /**
+ * Get authenticated user
+ */
+ public String getAuthUser(String feedid, String credentials) {
+ return (feeds.get(feedid).authusers.get(credentials));
+ }
+
+ /**
+ * Check if the request should be redirected to a different ingress node
+ */
+ public String getIngressNode(String feedid, String user, String ip) {
+ Feed f = feeds.get(feedid);
+ if (f.redirections.length == 0) {
+ return (null);
+ }
+ byte[] addr = NodeUtils.getInetAddress(ip);
+ for (Redirection r : f.redirections) {
+ if (r.user != null && !user.equals(r.user)) {
+ continue;
+ }
+ if (r.snm != null && !r.snm.matches(addr)) {
+ continue;
+ }
+ for (String n : r.nodes) {
+ if (myname.equals(n)) {
+ return (null);
+ }
+ }
+ if (r.nodes.length == 0) {
+ return (null);
+ }
+ return (r.nodes[rrcntr++ % r.nodes.length]);
+ }
+ return (null);
+ }
+
+ /**
+ * Get a provisioned configuration parameter
+ */
+ public String getProvParam(String name) {
+ return (params.get(name));
+ }
+
+ /**
+ * Get all the DestInfos
+ */
+ public DestInfo[] getAllDests() {
+ return (alldests);
+ }
+
+ /**
+ * Get the targets for a feed
+ *
+ * @param feedid The feed ID
+ * @return The targets this feed should be delivered to
+ */
+ public Target[] getTargets(String feedid) {
+ if (feedid == null) {
+ return (new Target[0]);
+ }
+ Feed f = feeds.get(feedid);
+ if (f == null) {
+ return (new Target[0]);
+ }
+ return (f.targets);
+ }
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid) {
+ DestInfo di = subinfo.get(subid);
+ if (di == null) {
+ return (null);
+ }
+ return (di.getLogData());
+ }
+
+ /**
+ * Get the spool directory for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The spool directory
+ */
+ public String getSpoolDir(String subid) {
+ DestInfo di = subinfo.get(subid);
+ if (di == null) {
+ return (null);
+ }
+ return (di.getSpool());
+ }
+
+ /**
+ * Get the Authorization value this node uses
+ *
+ * @return The Authorization header value for this node
+ */
+ public String getMyAuth() {
+ return (myauth);
+ }
+
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.net.*;\r
-import java.util.*;\r
-import java.io.*;\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-\r
-/**\r
- * Maintain the configuration of a Data Router node\r
- * <p>\r
- * The NodeConfigManager is the single point of contact for servlet, delivery, event logging, and log retention subsystems to access configuration information. (Log4J has its own configuration mechanism).\r
- * <p>\r
- * There are two basic sets of configuration data. The\r
- * static local configuration data, stored in a local configuration file (created\r
- * as part of installation by SWM), and the dynamic global\r
- * configuration data fetched from the data router provisioning server.\r
- */\r
-public class NodeConfigManager implements DeliveryQueueHelper {\r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");\r
- private static NodeConfigManager base = new NodeConfigManager();\r
-\r
- private Timer timer = new Timer("Node Configuration Timer", true);\r
- private long maxfailuretimer;\r
- private long initfailuretimer;\r
- private long expirationtimer;\r
- private double failurebackoff;\r
- private long fairtimelimit;\r
- private int fairfilelimit;\r
- private double fdpstart;\r
- private double fdpstop;\r
- private int deliverythreads;\r
- private String provurl;\r
- private String provhost;\r
- private IsFrom provcheck;\r
- private int gfport;\r
- private int svcport;\r
- private int port;\r
- private String spooldir;\r
- private String logdir;\r
- private long logretention;\r
- private String redirfile;\r
- private String kstype;\r
- private String ksfile;\r
- private String kspass;\r
- private String kpass;\r
- private String tstype;\r
- private String tsfile;\r
- private String tspass;\r
- private String myname;\r
- private RedirManager rdmgr;\r
- private RateLimitedOperation pfetcher;\r
- private NodeConfig config;\r
- private File quiesce;\r
- private PublishId pid;\r
- private String nak;\r
- private TaskList configtasks = new TaskList();\r
- private String eventlogurl;\r
- private String eventlogprefix;\r
- private String eventlogsuffix;\r
- private String eventloginterval;\r
- private boolean followredirects;\r
-\r
- \r
- /**\r
- * Get the default node configuration manager\r
- */\r
- public static NodeConfigManager getInstance() {\r
- return(base);\r
- }\r
- /**\r
- * Initialize the configuration of a Data Router node\r
- */\r
- private NodeConfigManager() {\r
- Properties p = new Properties();\r
- try {\r
- p.load(new FileInputStream(System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties")));\r
- } catch (Exception e) {\r
- \r
- NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");\r
- eelflogger.error(EelfMsgs.MESSAGE_PROPERTIES_LOAD_ERROR);\r
- logger.error("NODE0301 Unable to load local configuration file " + System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties"), e);\r
- }\r
- provurl = p.getProperty("ProvisioningURL", "https://feeds-drtr.web.att.com/internal/prov");\r
- try {\r
- provhost = (new URL(provurl)).getHost();\r
- } catch (Exception e) {\r
- NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");\r
- eelflogger.error(EelfMsgs.MESSAGE_BAD_PROV_URL, provurl);\r
- logger.error("NODE0302 Bad provisioning server URL " + provurl);\r
- System.exit(1);\r
- }\r
- logger.info("NODE0303 Provisioning server is " + provhost);\r
- eventlogurl = p.getProperty("LogUploadURL", "https://feeds-drtr.web.att.com/internal/logs");\r
- provcheck = new IsFrom(provhost);\r
- gfport = Integer.parseInt(p.getProperty("IntHttpPort", "8080"));\r
- svcport = Integer.parseInt(p.getProperty("IntHttpsPort", "8443"));\r
- port = Integer.parseInt(p.getProperty("ExtHttpsPort", "443"));\r
- long minpfinterval = Long.parseLong(p.getProperty("MinProvFetchInterval", "10000"));\r
- long minrsinterval = Long.parseLong(p.getProperty("MinRedirSaveInterval", "10000"));\r
- spooldir = p.getProperty("SpoolDir", "spool");\r
- File fdir = new File(spooldir + "/f");\r
- fdir.mkdirs();\r
- for (File junk: fdir.listFiles()) {\r
- if (junk.isFile()) {\r
- junk.delete();\r
- }\r
- }\r
- logdir = p.getProperty("LogDir", "logs");\r
- (new File(logdir)).mkdirs();\r
- logretention = Long.parseLong(p.getProperty("LogRetention", "30")) * 86400000L;\r
- eventlogprefix = logdir + "/events";\r
- eventlogsuffix = ".log";\r
- String redirfile = p.getProperty("RedirectionFile", "etc/redirections.dat");\r
- kstype = p.getProperty("KeyStoreType", "jks");\r
- ksfile = p.getProperty("KeyStoreFile", "etc/keystore");\r
- kspass = p.getProperty("KeyStorePassword", "changeme");\r
- kpass = p.getProperty("KeyPassword", "changeme");\r
- tstype = p.getProperty("TrustStoreType", "jks");\r
- tsfile = p.getProperty("TrustStoreFile");\r
- tspass = p.getProperty("TrustStorePassword", "changeme");\r
- if (tsfile != null && tsfile.length() > 0) {\r
- System.setProperty("javax.net.ssl.trustStoreType", tstype);\r
- System.setProperty("javax.net.ssl.trustStore", tsfile);\r
- System.setProperty("javax.net.ssl.trustStorePassword", tspass);\r
- }\r
- nak = p.getProperty("NodeAuthKey", "Node123!");\r
- quiesce = new File(p.getProperty("QuiesceFile", "etc/SHUTDOWN"));\r
- myname = NodeUtils.getCanonicalName(kstype, ksfile, kspass);\r
- if (myname == null) {\r
- NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");\r
- eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_FETCH_ERROR, ksfile);\r
- logger.error("NODE0309 Unable to fetch canonical name from keystore file " + ksfile);\r
- System.exit(1);\r
- }\r
- logger.info("NODE0304 My certificate says my name is " + myname);\r
- pid = new PublishId(myname);\r
- rdmgr = new RedirManager(redirfile, minrsinterval, timer);\r
- pfetcher = new RateLimitedOperation(minpfinterval, timer) {\r
- public void run() {\r
- fetchconfig();\r
- }\r
- };\r
- logger.info("NODE0305 Attempting to fetch configuration at " + provurl);\r
- pfetcher.request();\r
- }\r
- private void localconfig() {\r
- followredirects = Boolean.parseBoolean(getProvParam("FOLLOW_REDIRECTS", "false"));\r
- eventloginterval = getProvParam("LOGROLL_INTERVAL", "5m");\r
- initfailuretimer = 10000;\r
- maxfailuretimer = 3600000;\r
- expirationtimer = 86400000;\r
- failurebackoff = 2.0;\r
- deliverythreads = 40;\r
- fairfilelimit = 100;\r
- fairtimelimit = 60000;\r
- fdpstart = 0.05;\r
- fdpstop = 0.2;\r
- try { initfailuretimer = (long)(Double.parseDouble(getProvParam("DELIVERY_INIT_RETRY_INTERVAL")) * 1000); } catch (Exception e) {}\r
- try { maxfailuretimer = (long)(Double.parseDouble(getProvParam("DELIVERY_MAX_RETRY_INTERVAL")) * 1000); } catch (Exception e) {}\r
- try { expirationtimer = (long)(Double.parseDouble(getProvParam("DELIVERY_MAX_AGE")) * 1000); } catch (Exception e) {}\r
- try { failurebackoff = Double.parseDouble(getProvParam("DELIVERY_RETRY_RATIO")); } catch (Exception e) {}\r
- try { deliverythreads = Integer.parseInt(getProvParam("DELIVERY_THREADS")); } catch (Exception e) {}\r
- try { fairfilelimit = Integer.parseInt(getProvParam("FAIR_FILE_LIMIT")); } catch (Exception e) {}\r
- try { fairtimelimit = (long)(Double.parseDouble(getProvParam("FAIR_TIME_LIMIT")) * 1000); } catch (Exception e) {}\r
- try { fdpstart = Double.parseDouble(getProvParam("FREE_DISK_RED_PERCENT")) / 100.0; } catch (Exception e) {}\r
- try { fdpstop = Double.parseDouble(getProvParam("FREE_DISK_YELLOW_PERCENT")) / 100.0; } catch (Exception e) {}\r
- if (fdpstart < 0.01) {\r
- fdpstart = 0.01;\r
- }\r
- if (fdpstart > 0.5) {\r
- fdpstart = 0.5;\r
- }\r
- if (fdpstop < fdpstart) {\r
- fdpstop = fdpstart;\r
- }\r
- if (fdpstop > 0.5) {\r
- fdpstop = 0.5;\r
- }\r
- }\r
- private void fetchconfig() {\r
- try {\r
- System.out.println("provurl:: "+provurl);\r
- Reader r = new InputStreamReader((new URL(provurl)).openStream());\r
- config = new NodeConfig(new ProvData(r), myname, spooldir, port, nak);\r
- localconfig();\r
- configtasks.startRun();\r
- Runnable rr;\r
- while ((rr = configtasks.next()) != null) {\r
- try {\r
- rr.run();\r
- } catch (Exception e) {\r
- }\r
- }\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- NodeUtils.setIpAndFqdnForEelf("fetchconfigs");\r
- eelflogger.error(EelfMsgs.MESSAGE_CONF_FAILED, e.toString());\r
- logger.error("NODE0306 Configuration failed " + e.toString() + " - try again later", e);\r
- pfetcher.request();\r
- }\r
- }\r
- /**\r
- * Process a gofetch request from a particular IP address. If the\r
- * IP address is not an IP address we would go to to fetch the\r
- * provisioning data, ignore the request. If the data has been\r
- * fetched very recently (default 10 seconds), wait a while before fetching again.\r
- */\r
- public synchronized void gofetch(String remoteaddr) {\r
- if (provcheck.isFrom(remoteaddr)) {\r
- logger.info("NODE0307 Received configuration fetch request from provisioning server " + remoteaddr);\r
- pfetcher.request();\r
- } else {\r
- logger.info("NODE0308 Received configuration fetch request from unexpected server " + remoteaddr);\r
- }\r
- }\r
- /**\r
- * Am I configured?\r
- */\r
- public boolean isConfigured() {\r
- return(config != null);\r
- }\r
- /**\r
- * Am I shut down?\r
- */\r
- public boolean isShutdown() {\r
- return(quiesce.exists());\r
- }\r
- /**\r
- * Given a routing string, get the targets.\r
- * @param routing Target string\r
- * @return array of targets\r
- */\r
- public Target[] parseRouting(String routing) {\r
- return(config.parseRouting(routing));\r
- }\r
- /**\r
- * Given a set of credentials and an IP address, is this request from another node?\r
- * @param credentials Credentials offered by the supposed node\r
- * @param ip IP address the request came from\r
- * @return If the credentials and IP address are recognized, true, otherwise false.\r
- */\r
- public boolean isAnotherNode(String credentials, String ip) {\r
- return(config.isAnotherNode(credentials, ip));\r
- }\r
- /**\r
- * Check whether publication is allowed.\r
- * @param feedid The ID of the feed being requested\r
- * @param credentials The offered credentials\r
- * @param ip The requesting IP address\r
- * @return True if the IP and credentials are valid for the specified feed.\r
- */\r
- public String isPublishPermitted(String feedid, String credentials, String ip) {\r
- return(config.isPublishPermitted(feedid, credentials, ip));\r
- }\r
- /**\r
- * Check who the user is given the feed ID and the offered credentials.\r
- * @param feedid The ID of the feed specified\r
- * @param credentials The offered credentials\r
- * @return Null if the credentials are invalid or the user if they are valid.\r
- */\r
- public String getAuthUser(String feedid, String credentials) {\r
- return(config.getAuthUser(feedid, credentials));\r
- }\r
- /**\r
- * Check if the publish request should be sent to another node based on the feedid, user, and source IP address.\r
- * @param feedid The ID of the feed specified\r
- * @param user The publishing user\r
- * @param ip The IP address of the publish endpoint\r
- * @return Null if the request should be accepted or the correct hostname if it should be sent to another node.\r
- */\r
- public String getIngressNode(String feedid, String user, String ip) {\r
- return(config.getIngressNode(feedid, user, ip));\r
- }\r
- /**\r
- * Get a provisioned configuration parameter (from the provisioning server configuration)\r
- * @param name The name of the parameter\r
- * @return The value of the parameter or null if it is not defined.\r
- */\r
- public String getProvParam(String name) {\r
- return(config.getProvParam(name));\r
- }\r
- /**\r
- * Get a provisioned configuration parameter (from the provisioning server configuration)\r
- * @param name The name of the parameter\r
- * @param deflt The value to use if the parameter is not defined\r
- * @return The value of the parameter or deflt if it is not defined.\r
- */\r
- public String getProvParam(String name, String deflt) {\r
- name = config.getProvParam(name);\r
- if (name == null) {\r
- name = deflt;\r
- }\r
- return(name);\r
- }\r
- /**\r
- * Generate a publish ID\r
- */\r
- public String getPublishId() {\r
- return(pid.next());\r
- }\r
- /**\r
- * Get all the outbound spooling destinations.\r
- * This will include both subscriptions and nodes.\r
- */\r
- public DestInfo[] getAllDests() {\r
- return(config.getAllDests());\r
- }\r
- /**\r
- * Register a task to run whenever the configuration changes\r
- */\r
- public void registerConfigTask(Runnable task) {\r
- configtasks.addTask(task);\r
- }\r
- /**\r
- * Deregister a task to run whenever the configuration changes\r
- */\r
- public void deregisterConfigTask(Runnable task) {\r
- configtasks.removeTask(task);\r
- }\r
- /**\r
- * Get the URL to deliver a message to.\r
- * @param destinfo The destination information\r
- * @param fileid The file ID\r
- * @return The URL to deliver to\r
- */\r
- public String getDestURL(DestInfo destinfo, String fileid) {\r
- String subid = destinfo.getSubId();\r
- String purl = destinfo.getURL();\r
- if (followredirects && subid != null) {\r
- purl = rdmgr.lookup(subid, purl);\r
- }\r
- return(purl + "/" + fileid);\r
- }\r
- /**\r
- * Is a destination redirected?\r
- */\r
- public boolean isDestRedirected(DestInfo destinfo) {\r
- return(followredirects && rdmgr.isRedirected(destinfo.getSubId()));\r
- }\r
- /**\r
- * Set up redirection on receipt of a 3XX from a target URL\r
- */\r
- public boolean handleRedirection(DestInfo destinfo, String redirto, String fileid) {\r
- fileid = "/" + fileid;\r
- String subid = destinfo.getSubId();\r
- String purl = destinfo.getURL();\r
- if (followredirects && subid != null && redirto.endsWith(fileid)) {\r
- redirto = redirto.substring(0, redirto.length() - fileid.length());\r
- if (!redirto.equals(purl)) {\r
- rdmgr.redirect(subid, purl, redirto);\r
- return(true);\r
- }\r
- }\r
- return(false);\r
- }\r
- /**\r
- * Handle unreachable target URL\r
- */\r
- public void handleUnreachable(DestInfo destinfo) {\r
- String subid = destinfo.getSubId();\r
- if (followredirects && subid != null) {\r
- rdmgr.forget(subid);\r
- }\r
- }\r
- /**\r
- * Get the timeout before retrying after an initial delivery failure\r
- */\r
- public long getInitFailureTimer() {\r
- return(initfailuretimer);\r
- }\r
- /**\r
- * Get the maximum timeout between delivery attempts\r
- */\r
- public long getMaxFailureTimer() {\r
- return(maxfailuretimer);\r
- }\r
- /**\r
- * Get the ratio between consecutive delivery attempts\r
- */\r
- public double getFailureBackoff() {\r
- return(failurebackoff);\r
- }\r
- /**\r
- * Get the expiration timer for deliveries\r
- */\r
- public long getExpirationTimer() {\r
- return(expirationtimer);\r
- }\r
- /**\r
- * Get the maximum number of file delivery attempts before checking\r
- * if another queue has work to be performed.\r
- */\r
- public int getFairFileLimit() {\r
- return(fairfilelimit);\r
- }\r
- /**\r
- * Get the maximum amount of time spent delivering files before\r
- * checking if another queue has work to be performed.\r
- */\r
- public long getFairTimeLimit() {\r
- return(fairtimelimit);\r
- }\r
- /**\r
- * Get the targets for a feed\r
- * @param feedid The feed ID\r
- * @return The targets this feed should be delivered to\r
- */\r
- public Target[] getTargets(String feedid) {\r
- return(config.getTargets(feedid));\r
- }\r
- /**\r
- * Get the spool directory for temporary files\r
- */\r
- public String getSpoolDir() {\r
- return(spooldir + "/f");\r
- }\r
- /**\r
- * Get the base directory for spool directories\r
- */\r
- public String getSpoolBase() {\r
- return(spooldir);\r
- }\r
- /**\r
- * Get the key store type\r
- */\r
- public String getKSType() {\r
- return(kstype);\r
- }\r
- /**\r
- * Get the key store file\r
- */\r
- public String getKSFile() {\r
- return(ksfile);\r
- }\r
- /**\r
- * Get the key store password\r
- */\r
- public String getKSPass() {\r
- return(kspass);\r
- }\r
- /**\r
- * Get the key password\r
- */\r
- public String getKPass() {\r
- return(kpass);\r
- }\r
- /**\r
- * Get the http port\r
- */\r
- public int getHttpPort() {\r
- return(gfport);\r
- }\r
- /**\r
- * Get the https port\r
- */\r
- public int getHttpsPort() {\r
- return(svcport);\r
- }\r
- /**\r
- * Get the externally visible https port\r
- */\r
- public int getExtHttpsPort() {\r
- return(port);\r
- }\r
- /**\r
- * Get the external name of this machine\r
- */\r
- public String getMyName() {\r
- return(myname);\r
- }\r
- /**\r
- * Get the number of threads to use for delivery\r
- */\r
- public int getDeliveryThreads() {\r
- return(deliverythreads);\r
- }\r
- /**\r
- * Get the URL for uploading the event log data\r
- */\r
- public String getEventLogUrl() {\r
- return(eventlogurl);\r
- }\r
- /**\r
- * Get the prefix for the names of event log files\r
- */\r
- public String getEventLogPrefix() {\r
- return(eventlogprefix);\r
- }\r
- /**\r
- * Get the suffix for the names of the event log files\r
- */\r
- public String getEventLogSuffix() {\r
- return(eventlogsuffix);\r
- }\r
- /**\r
- * Get the interval between event log file rollovers\r
- */\r
- public String getEventLogInterval() {\r
- return(eventloginterval);\r
- }\r
- /**\r
- * Should I follow redirects from subscribers?\r
- */\r
- public boolean isFollowRedirects() {\r
- return(followredirects);\r
- }\r
- /**\r
- * Get the directory where the event and node log files live\r
- */\r
- public String getLogDir() {\r
- return(logdir);\r
- }\r
- /**\r
- * How long do I keep log files (in milliseconds)\r
- */\r
- public long getLogRetention() {\r
- return(logretention);\r
- }\r
- /**\r
- * Get the timer\r
- */\r
- public Timer getTimer() {\r
- return(timer);\r
- }\r
- /**\r
- * Get the feed ID for a subscription\r
- * @param subid The subscription ID\r
- * @return The feed ID\r
- */\r
- public String getFeedId(String subid) {\r
- return(config.getFeedId(subid));\r
- }\r
- /**\r
- * Get the authorization string this node uses\r
- * @return The Authorization string for this node\r
- */\r
- public String getMyAuth() {\r
- return(config.getMyAuth());\r
- }\r
- /**\r
- * Get the fraction of free spool disk space where we start throwing away undelivered files. This is FREE_DISK_RED_PERCENT / 100.0. Default is 0.05. Limited by 0.01 <= FreeDiskStart <= 0.5.\r
- */\r
- public double getFreeDiskStart() {\r
- return(fdpstart);\r
- }\r
- /**\r
- * Get the fraction of free spool disk space where we stop throwing away undelivered files. This is FREE_DISK_YELLOW_PERCENT / 100.0. Default is 0.2. Limited by FreeDiskStart <= FreeDiskStop <= 0.5.\r
- */\r
- public double getFreeDiskStop() {\r
- return(fdpstop);\r
- }\r
- /**\r
- * Get the spool directory for a subscription\r
- */\r
- public String getSpoolDir(String subid, String remoteaddr) {\r
- if (provcheck.isFrom(remoteaddr)) {\r
- String sdir = config.getSpoolDir(subid);\r
- if (sdir != null) {\r
- logger.info("NODE0310 Received subscription reset request for subscription " + subid + " from provisioning server " + remoteaddr);\r
- } else {\r
- logger.info("NODE0311 Received subscription reset request for unknown subscription " + subid + " from provisioning server " + remoteaddr);\r
- }\r
- return(sdir);\r
- } else {\r
- logger.info("NODE0312 Received subscription reset request from unexpected server " + remoteaddr);\r
- return(null);\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.net.*;
+import java.util.*;
+import java.io.*;
+
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+
+/**
+ * Maintain the configuration of a Data Router node
+ * <p>
+ * The NodeConfigManager is the single point of contact for servlet, delivery, event logging, and log retention subsystems to access configuration information. (Log4J has its own configuration mechanism).
+ * <p>
+ * There are two basic sets of configuration data. The
+ * static local configuration data, stored in a local configuration file (created
+ * as part of installation by SWM), and the dynamic global
+ * configuration data fetched from the data router provisioning server.
+ */
+public class NodeConfigManager implements DeliveryQueueHelper {
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeConfigManager");
+ private static NodeConfigManager base = new NodeConfigManager();
+
+ private Timer timer = new Timer("Node Configuration Timer", true);
+ private long maxfailuretimer;
+ private long initfailuretimer;
+ private long expirationtimer;
+ private double failurebackoff;
+ private long fairtimelimit;
+ private int fairfilelimit;
+ private double fdpstart;
+ private double fdpstop;
+ private int deliverythreads;
+ private String provurl;
+ private String provhost;
+ private IsFrom provcheck;
+ private int gfport;
+ private int svcport;
+ private int port;
+ private String spooldir;
+ private String logdir;
+ private long logretention;
+ private String redirfile;
+ private String kstype;
+ private String ksfile;
+ private String kspass;
+ private String kpass;
+ private String tstype;
+ private String tsfile;
+ private String tspass;
+ private String myname;
+ private RedirManager rdmgr;
+ private RateLimitedOperation pfetcher;
+ private NodeConfig config;
+ private File quiesce;
+ private PublishId pid;
+ private String nak;
+ private TaskList configtasks = new TaskList();
+ private String eventlogurl;
+ private String eventlogprefix;
+ private String eventlogsuffix;
+ private String eventloginterval;
+ private boolean followredirects;
+
+
+ /**
+ * Get the default node configuration manager
+ */
+ public static NodeConfigManager getInstance() {
+ return (base);
+ }
+
+ /**
+ * Initialize the configuration of a Data Router node
+ */
+ private NodeConfigManager() {
+ Properties p = new Properties();
+ try {
+ p.load(new FileInputStream(System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties")));
+ } catch (Exception e) {
+
+ NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");
+ eelflogger.error(EelfMsgs.MESSAGE_PROPERTIES_LOAD_ERROR);
+ logger.error("NODE0301 Unable to load local configuration file " + System.getProperty("org.onap.dmaap.datarouter.node.ConfigFile", "/opt/app/datartr/etc/node.properties"), e);
+ }
+ provurl = p.getProperty("ProvisioningURL", "https://feeds-drtr.web.att.com/internal/prov");
+ try {
+ provhost = (new URL(provurl)).getHost();
+ } catch (Exception e) {
+ NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");
+ eelflogger.error(EelfMsgs.MESSAGE_BAD_PROV_URL, provurl);
+ logger.error("NODE0302 Bad provisioning server URL " + provurl);
+ System.exit(1);
+ }
+ logger.info("NODE0303 Provisioning server is " + provhost);
+ eventlogurl = p.getProperty("LogUploadURL", "https://feeds-drtr.web.att.com/internal/logs");
+ provcheck = new IsFrom(provhost);
+ gfport = Integer.parseInt(p.getProperty("IntHttpPort", "8080"));
+ svcport = Integer.parseInt(p.getProperty("IntHttpsPort", "8443"));
+ port = Integer.parseInt(p.getProperty("ExtHttpsPort", "443"));
+ long minpfinterval = Long.parseLong(p.getProperty("MinProvFetchInterval", "10000"));
+ long minrsinterval = Long.parseLong(p.getProperty("MinRedirSaveInterval", "10000"));
+ spooldir = p.getProperty("SpoolDir", "spool");
+ File fdir = new File(spooldir + "/f");
+ fdir.mkdirs();
+ for (File junk : fdir.listFiles()) {
+ if (junk.isFile()) {
+ junk.delete();
+ }
+ }
+ logdir = p.getProperty("LogDir", "logs");
+ (new File(logdir)).mkdirs();
+ logretention = Long.parseLong(p.getProperty("LogRetention", "30")) * 86400000L;
+ eventlogprefix = logdir + "/events";
+ eventlogsuffix = ".log";
+ String redirfile = p.getProperty("RedirectionFile", "etc/redirections.dat");
+ kstype = p.getProperty("KeyStoreType", "jks");
+ ksfile = p.getProperty("KeyStoreFile", "etc/keystore");
+ kspass = p.getProperty("KeyStorePassword", "changeme");
+ kpass = p.getProperty("KeyPassword", "changeme");
+ tstype = p.getProperty("TrustStoreType", "jks");
+ tsfile = p.getProperty("TrustStoreFile");
+ tspass = p.getProperty("TrustStorePassword", "changeme");
+ if (tsfile != null && tsfile.length() > 0) {
+ System.setProperty("javax.net.ssl.trustStoreType", tstype);
+ System.setProperty("javax.net.ssl.trustStore", tsfile);
+ System.setProperty("javax.net.ssl.trustStorePassword", tspass);
+ }
+ nak = p.getProperty("NodeAuthKey", "Node123!");
+ quiesce = new File(p.getProperty("QuiesceFile", "etc/SHUTDOWN"));
+ myname = NodeUtils.getCanonicalName(kstype, ksfile, kspass);
+ if (myname == null) {
+ NodeUtils.setIpAndFqdnForEelf("NodeConfigManager");
+ eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_FETCH_ERROR, ksfile);
+ logger.error("NODE0309 Unable to fetch canonical name from keystore file " + ksfile);
+ System.exit(1);
+ }
+ logger.info("NODE0304 My certificate says my name is " + myname);
+ pid = new PublishId(myname);
+ rdmgr = new RedirManager(redirfile, minrsinterval, timer);
+ pfetcher = new RateLimitedOperation(minpfinterval, timer) {
+ public void run() {
+ fetchconfig();
+ }
+ };
+ logger.info("NODE0305 Attempting to fetch configuration at " + provurl);
+ pfetcher.request();
+ }
+
+ private void localconfig() {
+ followredirects = Boolean.parseBoolean(getProvParam("FOLLOW_REDIRECTS", "false"));
+ eventloginterval = getProvParam("LOGROLL_INTERVAL", "5m");
+ initfailuretimer = 10000;
+ maxfailuretimer = 3600000;
+ expirationtimer = 86400000;
+ failurebackoff = 2.0;
+ deliverythreads = 40;
+ fairfilelimit = 100;
+ fairtimelimit = 60000;
+ fdpstart = 0.05;
+ fdpstop = 0.2;
+ try {
+ initfailuretimer = (long) (Double.parseDouble(getProvParam("DELIVERY_INIT_RETRY_INTERVAL")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ maxfailuretimer = (long) (Double.parseDouble(getProvParam("DELIVERY_MAX_RETRY_INTERVAL")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ expirationtimer = (long) (Double.parseDouble(getProvParam("DELIVERY_MAX_AGE")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ failurebackoff = Double.parseDouble(getProvParam("DELIVERY_RETRY_RATIO"));
+ } catch (Exception e) {
+ }
+ try {
+ deliverythreads = Integer.parseInt(getProvParam("DELIVERY_THREADS"));
+ } catch (Exception e) {
+ }
+ try {
+ fairfilelimit = Integer.parseInt(getProvParam("FAIR_FILE_LIMIT"));
+ } catch (Exception e) {
+ }
+ try {
+ fairtimelimit = (long) (Double.parseDouble(getProvParam("FAIR_TIME_LIMIT")) * 1000);
+ } catch (Exception e) {
+ }
+ try {
+ fdpstart = Double.parseDouble(getProvParam("FREE_DISK_RED_PERCENT")) / 100.0;
+ } catch (Exception e) {
+ }
+ try {
+ fdpstop = Double.parseDouble(getProvParam("FREE_DISK_YELLOW_PERCENT")) / 100.0;
+ } catch (Exception e) {
+ }
+ if (fdpstart < 0.01) {
+ fdpstart = 0.01;
+ }
+ if (fdpstart > 0.5) {
+ fdpstart = 0.5;
+ }
+ if (fdpstop < fdpstart) {
+ fdpstop = fdpstart;
+ }
+ if (fdpstop > 0.5) {
+ fdpstop = 0.5;
+ }
+ }
+
+ private void fetchconfig() {
+ try {
+ System.out.println("provurl:: " + provurl);
+ Reader r = new InputStreamReader((new URL(provurl)).openStream());
+ config = new NodeConfig(new ProvData(r), myname, spooldir, port, nak);
+ localconfig();
+ configtasks.startRun();
+ Runnable rr;
+ while ((rr = configtasks.next()) != null) {
+ try {
+ rr.run();
+ } catch (Exception e) {
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ NodeUtils.setIpAndFqdnForEelf("fetchconfigs");
+ eelflogger.error(EelfMsgs.MESSAGE_CONF_FAILED, e.toString());
+ logger.error("NODE0306 Configuration failed " + e.toString() + " - try again later", e);
+ pfetcher.request();
+ }
+ }
+
+ /**
+ * Process a gofetch request from a particular IP address. If the
+ * IP address is not an IP address we would go to to fetch the
+ * provisioning data, ignore the request. If the data has been
+ * fetched very recently (default 10 seconds), wait a while before fetching again.
+ */
+ public synchronized void gofetch(String remoteaddr) {
+ if (provcheck.isFrom(remoteaddr)) {
+ logger.info("NODE0307 Received configuration fetch request from provisioning server " + remoteaddr);
+ pfetcher.request();
+ } else {
+ logger.info("NODE0308 Received configuration fetch request from unexpected server " + remoteaddr);
+ }
+ }
+
+ /**
+ * Am I configured?
+ */
+ public boolean isConfigured() {
+ return (config != null);
+ }
+
+ /**
+ * Am I shut down?
+ */
+ public boolean isShutdown() {
+ return (quiesce.exists());
+ }
+
+ /**
+ * Given a routing string, get the targets.
+ *
+ * @param routing Target string
+ * @return array of targets
+ */
+ public Target[] parseRouting(String routing) {
+ return (config.parseRouting(routing));
+ }
+
+ /**
+ * Given a set of credentials and an IP address, is this request from another node?
+ *
+ * @param credentials Credentials offered by the supposed node
+ * @param ip IP address the request came from
+ * @return If the credentials and IP address are recognized, true, otherwise false.
+ */
+ public boolean isAnotherNode(String credentials, String ip) {
+ return (config.isAnotherNode(credentials, ip));
+ }
+
+ /**
+ * Check whether publication is allowed.
+ *
+ * @param feedid The ID of the feed being requested
+ * @param credentials The offered credentials
+ * @param ip The requesting IP address
+ * @return True if the IP and credentials are valid for the specified feed.
+ */
+ public String isPublishPermitted(String feedid, String credentials, String ip) {
+ return (config.isPublishPermitted(feedid, credentials, ip));
+ }
+
+ /**
+ * Check who the user is given the feed ID and the offered credentials.
+ *
+ * @param feedid The ID of the feed specified
+ * @param credentials The offered credentials
+ * @return Null if the credentials are invalid or the user if they are valid.
+ */
+ public String getAuthUser(String feedid, String credentials) {
+ return (config.getAuthUser(feedid, credentials));
+ }
+
+ /**
+ * Check if the publish request should be sent to another node based on the feedid, user, and source IP address.
+ *
+ * @param feedid The ID of the feed specified
+ * @param user The publishing user
+ * @param ip The IP address of the publish endpoint
+ * @return Null if the request should be accepted or the correct hostname if it should be sent to another node.
+ */
+ public String getIngressNode(String feedid, String user, String ip) {
+ return (config.getIngressNode(feedid, user, ip));
+ }
+
+ /**
+ * Get a provisioned configuration parameter (from the provisioning server configuration)
+ *
+ * @param name The name of the parameter
+ * @return The value of the parameter or null if it is not defined.
+ */
+ public String getProvParam(String name) {
+ return (config.getProvParam(name));
+ }
+
+ /**
+ * Get a provisioned configuration parameter (from the provisioning server configuration)
+ *
+ * @param name The name of the parameter
+ * @param deflt The value to use if the parameter is not defined
+ * @return The value of the parameter or deflt if it is not defined.
+ */
+ public String getProvParam(String name, String deflt) {
+ name = config.getProvParam(name);
+ if (name == null) {
+ name = deflt;
+ }
+ return (name);
+ }
+
+ /**
+ * Generate a publish ID
+ */
+ public String getPublishId() {
+ return (pid.next());
+ }
+
+ /**
+ * Get all the outbound spooling destinations.
+ * This will include both subscriptions and nodes.
+ */
+ public DestInfo[] getAllDests() {
+ return (config.getAllDests());
+ }
+
+ /**
+ * Register a task to run whenever the configuration changes
+ */
+ public void registerConfigTask(Runnable task) {
+ configtasks.addTask(task);
+ }
+
+ /**
+ * Deregister a task to run whenever the configuration changes
+ */
+ public void deregisterConfigTask(Runnable task) {
+ configtasks.removeTask(task);
+ }
+
+ /**
+ * Get the URL to deliver a message to.
+ *
+ * @param destinfo The destination information
+ * @param fileid The file ID
+ * @return The URL to deliver to
+ */
+ public String getDestURL(DestInfo destinfo, String fileid) {
+ String subid = destinfo.getSubId();
+ String purl = destinfo.getURL();
+ if (followredirects && subid != null) {
+ purl = rdmgr.lookup(subid, purl);
+ }
+ return (purl + "/" + fileid);
+ }
+
+ /**
+ * Is a destination redirected?
+ */
+ public boolean isDestRedirected(DestInfo destinfo) {
+ return (followredirects && rdmgr.isRedirected(destinfo.getSubId()));
+ }
+
+ /**
+ * Set up redirection on receipt of a 3XX from a target URL
+ */
+ public boolean handleRedirection(DestInfo destinfo, String redirto, String fileid) {
+ fileid = "/" + fileid;
+ String subid = destinfo.getSubId();
+ String purl = destinfo.getURL();
+ if (followredirects && subid != null && redirto.endsWith(fileid)) {
+ redirto = redirto.substring(0, redirto.length() - fileid.length());
+ if (!redirto.equals(purl)) {
+ rdmgr.redirect(subid, purl, redirto);
+ return (true);
+ }
+ }
+ return (false);
+ }
+
+ /**
+ * Handle unreachable target URL
+ */
+ public void handleUnreachable(DestInfo destinfo) {
+ String subid = destinfo.getSubId();
+ if (followredirects && subid != null) {
+ rdmgr.forget(subid);
+ }
+ }
+
+ /**
+ * Get the timeout before retrying after an initial delivery failure
+ */
+ public long getInitFailureTimer() {
+ return (initfailuretimer);
+ }
+
+ /**
+ * Get the maximum timeout between delivery attempts
+ */
+ public long getMaxFailureTimer() {
+ return (maxfailuretimer);
+ }
+
+ /**
+ * Get the ratio between consecutive delivery attempts
+ */
+ public double getFailureBackoff() {
+ return (failurebackoff);
+ }
+
+ /**
+ * Get the expiration timer for deliveries
+ */
+ public long getExpirationTimer() {
+ return (expirationtimer);
+ }
+
+ /**
+ * Get the maximum number of file delivery attempts before checking
+ * if another queue has work to be performed.
+ */
+ public int getFairFileLimit() {
+ return (fairfilelimit);
+ }
+
+ /**
+ * Get the maximum amount of time spent delivering files before
+ * checking if another queue has work to be performed.
+ */
+ public long getFairTimeLimit() {
+ return (fairtimelimit);
+ }
+
+ /**
+ * Get the targets for a feed
+ *
+ * @param feedid The feed ID
+ * @return The targets this feed should be delivered to
+ */
+ public Target[] getTargets(String feedid) {
+ return (config.getTargets(feedid));
+ }
+
+ /**
+ * Get the spool directory for temporary files
+ */
+ public String getSpoolDir() {
+ return (spooldir + "/f");
+ }
+
+ /**
+ * Get the base directory for spool directories
+ */
+ public String getSpoolBase() {
+ return (spooldir);
+ }
+
+ /**
+ * Get the key store type
+ */
+ public String getKSType() {
+ return (kstype);
+ }
+
+ /**
+ * Get the key store file
+ */
+ public String getKSFile() {
+ return (ksfile);
+ }
+
+ /**
+ * Get the key store password
+ */
+ public String getKSPass() {
+ return (kspass);
+ }
+
+ /**
+ * Get the key password
+ */
+ public String getKPass() {
+ return (kpass);
+ }
+
+ /**
+ * Get the http port
+ */
+ public int getHttpPort() {
+ return (gfport);
+ }
+
+ /**
+ * Get the https port
+ */
+ public int getHttpsPort() {
+ return (svcport);
+ }
+
+ /**
+ * Get the externally visible https port
+ */
+ public int getExtHttpsPort() {
+ return (port);
+ }
+
+ /**
+ * Get the external name of this machine
+ */
+ public String getMyName() {
+ return (myname);
+ }
+
+ /**
+ * Get the number of threads to use for delivery
+ */
+ public int getDeliveryThreads() {
+ return (deliverythreads);
+ }
+
+ /**
+ * Get the URL for uploading the event log data
+ */
+ public String getEventLogUrl() {
+ return (eventlogurl);
+ }
+
+ /**
+ * Get the prefix for the names of event log files
+ */
+ public String getEventLogPrefix() {
+ return (eventlogprefix);
+ }
+
+ /**
+ * Get the suffix for the names of the event log files
+ */
+ public String getEventLogSuffix() {
+ return (eventlogsuffix);
+ }
+
+ /**
+ * Get the interval between event log file rollovers
+ */
+ public String getEventLogInterval() {
+ return (eventloginterval);
+ }
+
+ /**
+ * Should I follow redirects from subscribers?
+ */
+ public boolean isFollowRedirects() {
+ return (followredirects);
+ }
+
+ /**
+ * Get the directory where the event and node log files live
+ */
+ public String getLogDir() {
+ return (logdir);
+ }
+
+ /**
+ * How long do I keep log files (in milliseconds)
+ */
+ public long getLogRetention() {
+ return (logretention);
+ }
+
+ /**
+ * Get the timer
+ */
+ public Timer getTimer() {
+ return (timer);
+ }
+
+ /**
+ * Get the feed ID for a subscription
+ *
+ * @param subid The subscription ID
+ * @return The feed ID
+ */
+ public String getFeedId(String subid) {
+ return (config.getFeedId(subid));
+ }
+
+ /**
+ * Get the authorization string this node uses
+ *
+ * @return The Authorization string for this node
+ */
+ public String getMyAuth() {
+ return (config.getMyAuth());
+ }
+
+ /**
+ * Get the fraction of free spool disk space where we start throwing away undelivered files. This is FREE_DISK_RED_PERCENT / 100.0. Default is 0.05. Limited by 0.01 <= FreeDiskStart <= 0.5.
+ */
+ public double getFreeDiskStart() {
+ return (fdpstart);
+ }
+
+ /**
+ * Get the fraction of free spool disk space where we stop throwing away undelivered files. This is FREE_DISK_YELLOW_PERCENT / 100.0. Default is 0.2. Limited by FreeDiskStart <= FreeDiskStop <= 0.5.
+ */
+ public double getFreeDiskStop() {
+ return (fdpstop);
+ }
+
+ /**
+ * Get the spool directory for a subscription
+ */
+ public String getSpoolDir(String subid, String remoteaddr) {
+ if (provcheck.isFrom(remoteaddr)) {
+ String sdir = config.getSpoolDir(subid);
+ if (sdir != null) {
+ logger.info("NODE0310 Received subscription reset request for subscription " + subid + " from provisioning server " + remoteaddr);
+ } else {
+ logger.info("NODE0311 Received subscription reset request for unknown subscription " + subid + " from provisioning server " + remoteaddr);
+ }
+ return (sdir);
+ } else {
+ logger.info("NODE0312 Received subscription reset request from unexpected server " + remoteaddr);
+ return (null);
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import org.eclipse.jetty.servlet.*;\r
-import org.eclipse.jetty.util.ssl.*;\r
-import org.eclipse.jetty.server.*;\r
-import org.eclipse.jetty.server.nio.*;\r
-import org.eclipse.jetty.server.ssl.*;\r
-import org.apache.log4j.Logger;\r
-\r
-/**\r
- * The main starting point for the Data Router node\r
- */\r
-public class NodeMain {\r
- private NodeMain() {}\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeMain");\r
- private static class wfconfig implements Runnable {\r
- private NodeConfigManager ncm;\r
- public wfconfig(NodeConfigManager ncm) {\r
- this.ncm = ncm;\r
- }\r
- public synchronized void run() {\r
- notify();\r
- }\r
- public synchronized void waitforconfig() {\r
- ncm.registerConfigTask(this);\r
- while (!ncm.isConfigured()) {\r
- logger.info("NODE0003 Waiting for Node Configuration");\r
- try {\r
- wait();\r
- } catch (Exception e) {\r
- }\r
- }\r
- ncm.deregisterConfigTask(this);\r
- logger.info("NODE0004 Node Configuration Data Received");\r
- }\r
- }\r
- private static Delivery d;\r
- private static NodeConfigManager ncm;\r
- /**\r
- * Reset the retry timer for a subscription\r
- */\r
- public static void resetQueue(String subid, String ip) {\r
- d.resetQueue(ncm.getSpoolDir(subid, ip));\r
- }\r
- /**\r
- * Start the data router.\r
- * <p>\r
- * The location of the node configuration file can be set using the\r
- * org.onap.dmaap.datarouter.node.ConfigFile system property. By\r
- * default, it is "etc/node.properties".\r
- */\r
- public static void main(String[] args) throws Exception {\r
- logger.info("NODE0001 Data Router Node Starting");\r
- IsFrom.setDNSCache();\r
- ncm = NodeConfigManager.getInstance();\r
- logger.info("NODE0002 I am " + ncm.getMyName());\r
- (new wfconfig(ncm)).waitforconfig();\r
- d = new Delivery(ncm);\r
- LogManager lm = new LogManager(ncm);\r
- Server server = new Server();\r
- SelectChannelConnector http = new SelectChannelConnector();\r
- http.setPort(ncm.getHttpPort());\r
- http.setMaxIdleTime(2000);\r
- http.setRequestHeaderSize(2048);\r
- SslSelectChannelConnector https = new SslSelectChannelConnector();\r
- https.setPort(ncm.getHttpsPort());\r
- https.setMaxIdleTime(30000);\r
- https.setRequestHeaderSize(8192);\r
- SslContextFactory cf = https.getSslContextFactory();\r
- \r
- /**Skip SSLv3 Fixes*/\r
- cf.addExcludeProtocols("SSLv3");\r
- logger.info("Excluded protocols node-"+cf.getExcludeProtocols());\r
- /**End of SSLv3 Fixes*/\r
-\r
- cf.setKeyStoreType(ncm.getKSType());\r
- cf.setKeyStorePath(ncm.getKSFile());\r
- cf.setKeyStorePassword(ncm.getKSPass());\r
- cf.setKeyManagerPassword(ncm.getKPass());\r
- server.setConnectors(new Connector[] { http, https });\r
- ServletContextHandler ctxt = new ServletContextHandler(0);\r
- ctxt.setContextPath("/");\r
- server.setHandler(ctxt);\r
- ctxt.addServlet(new ServletHolder(new NodeServlet()), "/*");\r
- logger.info("NODE0005 Data Router Node Activating Service");\r
- server.start();\r
- server.join();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import org.eclipse.jetty.servlet.*;
+import org.eclipse.jetty.util.ssl.*;
+import org.eclipse.jetty.server.*;
+import org.eclipse.jetty.server.nio.*;
+import org.eclipse.jetty.server.ssl.*;
+import org.apache.log4j.Logger;
+
+/**
+ * The main starting point for the Data Router node
+ */
+public class NodeMain {
+ private NodeMain() {
+ }
+
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeMain");
+
+ private static class wfconfig implements Runnable {
+ private NodeConfigManager ncm;
+
+ public wfconfig(NodeConfigManager ncm) {
+ this.ncm = ncm;
+ }
+
+ public synchronized void run() {
+ notify();
+ }
+
+ public synchronized void waitforconfig() {
+ ncm.registerConfigTask(this);
+ while (!ncm.isConfigured()) {
+ logger.info("NODE0003 Waiting for Node Configuration");
+ try {
+ wait();
+ } catch (Exception e) {
+ }
+ }
+ ncm.deregisterConfigTask(this);
+ logger.info("NODE0004 Node Configuration Data Received");
+ }
+ }
+
+ private static Delivery d;
+ private static NodeConfigManager ncm;
+
+ /**
+ * Reset the retry timer for a subscription
+ */
+ public static void resetQueue(String subid, String ip) {
+ d.resetQueue(ncm.getSpoolDir(subid, ip));
+ }
+
+ /**
+ * Start the data router.
+ * <p>
+ * The location of the node configuration file can be set using the
+ * org.onap.dmaap.datarouter.node.ConfigFile system property. By
+ * default, it is "etc/node.properties".
+ */
+ public static void main(String[] args) throws Exception {
+ logger.info("NODE0001 Data Router Node Starting");
+ IsFrom.setDNSCache();
+ ncm = NodeConfigManager.getInstance();
+ logger.info("NODE0002 I am " + ncm.getMyName());
+ (new wfconfig(ncm)).waitforconfig();
+ d = new Delivery(ncm);
+ LogManager lm = new LogManager(ncm);
+ Server server = new Server();
+ SelectChannelConnector http = new SelectChannelConnector();
+ http.setPort(ncm.getHttpPort());
+ http.setMaxIdleTime(2000);
+ http.setRequestHeaderSize(2048);
+ SslSelectChannelConnector https = new SslSelectChannelConnector();
+ https.setPort(ncm.getHttpsPort());
+ https.setMaxIdleTime(30000);
+ https.setRequestHeaderSize(8192);
+ SslContextFactory cf = https.getSslContextFactory();
+
+ /**Skip SSLv3 Fixes*/
+ cf.addExcludeProtocols("SSLv3");
+ logger.info("Excluded protocols node-" + cf.getExcludeProtocols());
+ /**End of SSLv3 Fixes*/
+
+ cf.setKeyStoreType(ncm.getKSType());
+ cf.setKeyStorePath(ncm.getKSFile());
+ cf.setKeyStorePassword(ncm.getKSPass());
+ cf.setKeyManagerPassword(ncm.getKPass());
+ server.setConnectors(new Connector[]{http, https});
+ ServletContextHandler ctxt = new ServletContextHandler(0);
+ ctxt.setContextPath("/");
+ server.setHandler(ctxt);
+ ctxt.addServlet(new ServletHolder(new NodeServlet()), "/*");
+ logger.info("NODE0005 Data Router Node Activating Service");
+ server.start();
+ server.join();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import javax.servlet.*;\r
-import javax.servlet.http.*;\r
-import java.util.*;\r
-import java.util.regex.*;\r
-import java.io.*;\r
-import java.nio.file.*;\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-import java.net.*;\r
-\r
-/**\r
- * Servlet for handling all http and https requests to the data router node\r
- * <p>\r
- * Handled requests are:\r
- * <br>\r
- * GET http://<i>node</i>/internal/fetchProv - fetch the provisioning data\r
- * <br>\r
- * PUT/DELETE https://<i>node</i>/internal/publish/<i>fileid</i> - n2n transfer\r
- * <br>\r
- * PUT/DELETE https://<i>node</i>/publish/<i>feedid</i>/<i>fileid</i> - publsh request\r
- */\r
-public class NodeServlet extends HttpServlet {\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeServlet");\r
- private static NodeConfigManager config;\r
- private static Pattern MetaDataPattern;\r
- private static SubnetMatcher internalsubnet = new SubnetMatcher("135.207.136.128/25");\r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeServlet");\r
-\r
- static {\r
- try {\r
- String ws = "\\s*";\r
- // assume that \\ and \" have been replaced by X\r
- String string = "\"[^\"]*\"";\r
- //String string = "\"(?:[^\"\\\\]|\\\\.)*\"";\r
- String number = "[+-]?(?:\\.\\d+|(?:0|[1-9]\\d*)(?:\\.\\d*)?)(?:[eE][+-]?\\d+)?";\r
- String value = "(?:" + string + "|" + number + "|null|true|false)";\r
- String item = string + ws + ":" + ws + value + ws;\r
- String object = ws + "\\{" + ws + "(?:" + item + "(?:" + "," + ws + item + ")*)?\\}" + ws;\r
- MetaDataPattern = Pattern.compile(object, Pattern.DOTALL);\r
- } catch (Exception e) {\r
- }\r
- }\r
- /**\r
- * Get the NodeConfigurationManager\r
- */\r
- public void init() {\r
- config = NodeConfigManager.getInstance();\r
- logger.info("NODE0101 Node Servlet Configured");\r
- }\r
- private boolean down(HttpServletResponse resp) throws IOException {\r
- if (config.isShutdown() || !config.isConfigured()) {\r
- resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);\r
- logger.info("NODE0102 Rejecting request: Service is being quiesced");\r
- return(true);\r
- }\r
- return(false);\r
- }\r
- /**\r
- * Handle a GET for /internal/fetchProv\r
- */\r
- protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\r
- NodeUtils.setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");\r
- if (down(resp)) {\r
- return;\r
- }\r
- String path = req.getPathInfo();\r
- String qs = req.getQueryString();\r
- String ip = req.getRemoteAddr();\r
- if (qs != null) {\r
- path = path + "?" + qs;\r
- }\r
- if ("/internal/fetchProv".equals(path)) {\r
- config.gofetch(ip);\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- return;\r
- } else if (path.startsWith("/internal/resetSubscription/")) {\r
- String subid = path.substring(28);\r
- if (subid.length() != 0 && subid.indexOf('/') == -1) {\r
- NodeMain.resetQueue(subid, ip);\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- return;\r
- }\r
- }\r
- if (internalsubnet.matches(NodeUtils.getInetAddress(ip))) {\r
- if (path.startsWith("/internal/logs/")) {\r
- String f = path.substring(15);\r
- File fn = new File(config.getLogDir() + "/" + f);\r
- if (f.indexOf('/') != -1 || !fn.isFile()) {\r
- logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND);\r
- return;\r
- }\r
- byte[] buf = new byte[65536];\r
- resp.setContentType("text/plain");\r
- resp.setContentLength((int)fn.length());\r
- resp.setStatus(200);\r
- InputStream is = new FileInputStream(fn);\r
- OutputStream os = resp.getOutputStream();\r
- int i;\r
- while ((i = is.read(buf)) > 0) {\r
- os.write(buf, 0, i);\r
- }\r
- is.close();\r
- return;\r
- }\r
- if (path.startsWith("/internal/rtt/")) {\r
- String xip = path.substring(14);\r
- long st = System.currentTimeMillis();\r
- String status = " unknown";\r
- try {\r
- Socket s = new Socket(xip, 443);\r
- s.close();\r
- status = " connected";\r
- } catch (Exception e) {\r
- status = " error " + e.toString();\r
- }\r
- long dur = System.currentTimeMillis() - st;\r
- resp.setContentType("text/plain");\r
- resp.setStatus(200);\r
- byte[] buf = (dur + status + "\n").getBytes();\r
- resp.setContentLength(buf.length);\r
- resp.getOutputStream().write(buf);\r
- return;\r
- }\r
- }\r
- logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND);\r
- return;\r
- }\r
- /**\r
- * Handle all PUT requests\r
- */\r
- protected void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\r
- NodeUtils.setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");\r
- common(req, resp, true);\r
- }\r
- /**\r
- * Handle all DELETE requests\r
- */\r
- protected void doDelete(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {\r
- NodeUtils.setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"),getIdFromPath(req)+"");\r
- common(req, resp, false);\r
- }\r
- private void common(HttpServletRequest req, HttpServletResponse resp, boolean isput) throws ServletException, IOException {\r
- if (down(resp)) {\r
- return;\r
- }\r
- if (!req.isSecure()) {\r
- logger.info("NODE0104 Rejecting insecure PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "https required on publish requests");\r
- return;\r
- }\r
- String fileid = req.getPathInfo();\r
- if (fileid == null) {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");\r
- return;\r
- }\r
- String feedid = null;\r
- String user = null;\r
- String credentials = req.getHeader("Authorization");\r
- if (credentials == null) {\r
- logger.info("NODE0106 Rejecting unauthenticated PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Authorization header required");\r
- return;\r
- }\r
- String ip = req.getRemoteAddr();\r
- String lip = req.getLocalAddr();\r
- String pubid = null;\r
- String xpubid = null;\r
- String rcvd = NodeUtils.logts(System.currentTimeMillis()) + ";from=" + ip + ";by=" + lip;\r
- Target[] targets = null;\r
- if (fileid.startsWith("/publish/")) {\r
- fileid = fileid.substring(9);\r
- int i = fileid.indexOf('/');\r
- if (i == -1 || i == fileid.length() - 1) {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>. Possible missing fileid.");\r
- return;\r
- }\r
- feedid = fileid.substring(0, i);\r
- fileid = fileid.substring(i + 1);\r
- pubid = config.getPublishId();\r
- xpubid = req.getHeader("X-ATT-DR-PUBLISH-ID");\r
- targets = config.getTargets(feedid);\r
- } else if (fileid.startsWith("/internal/publish/")) {\r
- if (!config.isAnotherNode(credentials, ip)) {\r
- logger.info("NODE0107 Rejecting unauthorized node-to-node transfer attempt from " + ip);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN);\r
- return;\r
- }\r
- fileid = fileid.substring(18);\r
- pubid = req.getHeader("X-ATT-DR-PUBLISH-ID");\r
- targets = config.parseRouting(req.getHeader("X-ATT-DR-ROUTING"));\r
- } else {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");\r
- return;\r
- }\r
- if (fileid.indexOf('/') != -1) {\r
- logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");\r
- return;\r
- }\r
- String qs = req.getQueryString();\r
- if (qs != null) {\r
- fileid = fileid + "?" + qs;\r
- }\r
- String hp = config.getMyName();\r
- int xp = config.getExtHttpsPort();\r
- if (xp != 443) {\r
- hp = hp + ":" + xp;\r
- }\r
- String logurl = "https://" + hp + "/internal/publish/" + fileid;\r
- if (feedid != null) {\r
- logurl = "https://" + hp + "/publish/" + feedid + "/" + fileid;\r
- String reason = config.isPublishPermitted(feedid, credentials, ip);\r
- if (reason != null) {\r
- logger.info("NODE0111 Rejecting unauthorized publish attempt to feed " + feedid + " fileid " + fileid + " from " + ip + " reason " + reason);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN,reason);\r
- return;\r
- }\r
- user = config.getAuthUser(feedid, credentials);\r
- String newnode = config.getIngressNode(feedid, user, ip);\r
- if (newnode != null) {\r
- String port = "";\r
- int iport = config.getExtHttpsPort();\r
- if (iport != 443) {\r
- port = ":" + iport;\r
- }\r
- String redirto = "https://" + newnode + port + "/publish/" + feedid + "/" + fileid;\r
- logger.info("NODE0108 Redirecting publish attempt for feed " + feedid + " user " + user + " ip " + ip + " to " + redirto);\r
- resp.sendRedirect(redirto);\r
- return;\r
- }\r
- resp.setHeader("X-ATT-DR-PUBLISH-ID", pubid);\r
- }\r
- String fbase = config.getSpoolDir() + "/" + pubid;\r
- File data = new File(fbase);\r
- File meta = new File(fbase + ".M");\r
- OutputStream dos = null;\r
- Writer mw = null;\r
- InputStream is = null;\r
- try {\r
- StringBuffer mx = new StringBuffer();\r
- mx.append(req.getMethod()).append('\t').append(fileid).append('\n');\r
- Enumeration hnames = req.getHeaderNames();\r
- String ctype = null;\r
- while (hnames.hasMoreElements()) {\r
- String hn = (String)hnames.nextElement();\r
- String hnlc = hn.toLowerCase();\r
- if ((isput && ("content-type".equals(hnlc) ||\r
- "content-language".equals(hnlc) ||\r
- "content-md5".equals(hnlc) ||\r
- "content-range".equals(hnlc))) ||\r
- "x-att-dr-meta".equals(hnlc) ||\r
- (feedid == null && "x-att-dr-received".equals(hnlc)) ||\r
- (hnlc.startsWith("x-") && !hnlc.startsWith("x-att-dr-"))) {\r
- Enumeration hvals = req.getHeaders(hn);\r
- while (hvals.hasMoreElements()) {\r
- String hv = (String)hvals.nextElement();\r
- if ("content-type".equals(hnlc)) {\r
- ctype = hv;\r
- }\r
- if ("x-att-dr-meta".equals(hnlc)) {\r
- if (hv.length() > 4096) {\r
- logger.info("NODE0109 Rejecting publish attempt with metadata too long for feed " + feedid + " user " + user + " ip " + ip);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Metadata too long");\r
- return;\r
- }\r
- if (!MetaDataPattern.matcher(hv.replaceAll("\\\\.", "X")).matches()) {\r
- logger.info("NODE0109 Rejecting publish attempt with malformed metadata for feed " + feedid + " user " + user + " ip " + ip);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Malformed metadata");\r
- return;\r
- }\r
- }\r
- mx.append(hn).append('\t').append(hv).append('\n');\r
- }\r
- }\r
- }\r
- mx.append("X-ATT-DR-RECEIVED\t").append(rcvd).append('\n');\r
- String metadata = mx.toString();\r
- byte[] buf = new byte[1024 * 1024];\r
- int i;\r
- try {\r
- is = req.getInputStream();\r
- dos = new FileOutputStream(data);\r
- while ((i = is.read(buf)) > 0) {\r
- dos.write(buf, 0, i);\r
- }\r
- is.close();\r
- is = null;\r
- dos.close();\r
- dos = null;\r
- } catch (IOException ioe) {\r
- long exlen = -1;\r
- try {\r
- exlen = Long.parseLong(req.getHeader("Content-Length"));\r
- } catch (Exception e) {\r
- }\r
- StatusLog.logPubFail(pubid, feedid, logurl, req.getMethod(), ctype, exlen, data.length(), ip, user, ioe.getMessage());\r
- throw ioe;\r
- }\r
- Path dpath = Paths.get(fbase);\r
- for (Target t: targets) {\r
- DestInfo di = t.getDestInfo();\r
- if (di == null) {\r
- // TODO: unknown destination\r
- continue;\r
- }\r
- String dbase = di.getSpool() + "/" + pubid;\r
- Files.createLink(Paths.get(dbase), dpath);\r
- mw = new FileWriter(meta);\r
- mw.write(metadata);\r
- if (di.getSubId() == null) {\r
- mw.write("X-ATT-DR-ROUTING\t" + t.getRouting() + "\n");\r
- }\r
- mw.close();\r
- meta.renameTo(new File(dbase + ".M"));\r
- }\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- resp.getOutputStream().close();\r
- StatusLog.logPub(pubid, feedid, logurl, req.getMethod(), ctype, data.length(), ip, user, HttpServletResponse.SC_NO_CONTENT);\r
- } catch (IOException ioe) {\r
- logger.info("NODE0110 IO Exception receiving publish attempt for feed " + feedid + " user " + user + " ip " + ip + " " + ioe.toString(), ioe);\r
- throw ioe;\r
- } finally {\r
- if (is != null) { try { is.close(); } catch (Exception e) {}}\r
- if (dos != null) { try { dos.close(); } catch (Exception e) {}}\r
- if (mw != null) { try { mw.close(); } catch (Exception e) {}}\r
- try { data.delete(); } catch (Exception e) {}\r
- try { meta.delete(); } catch (Exception e) {}\r
- }\r
- }\r
- \r
- private int getIdFromPath(HttpServletRequest req) {\r
- String path = req.getPathInfo();\r
- if (path == null || path.length() < 2)\r
- return -1;\r
- try {\r
- return Integer.parseInt(path.substring(1));\r
- } catch (NumberFormatException e) {\r
- return -1;\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import javax.servlet.*;
+import javax.servlet.http.*;
+import java.util.*;
+import java.util.regex.*;
+import java.io.*;
+import java.nio.file.*;
+
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+import java.net.*;
+
+/**
+ * Servlet for handling all http and https requests to the data router node
+ * <p>
+ * Handled requests are:
+ * <br>
+ * GET http://<i>node</i>/internal/fetchProv - fetch the provisioning data
+ * <br>
+ * PUT/DELETE https://<i>node</i>/internal/publish/<i>fileid</i> - n2n transfer
+ * <br>
+ * PUT/DELETE https://<i>node</i>/publish/<i>feedid</i>/<i>fileid</i> - publsh request
+ */
+public class NodeServlet extends HttpServlet {
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeServlet");
+ private static NodeConfigManager config;
+ private static Pattern MetaDataPattern;
+ private static SubnetMatcher internalsubnet = new SubnetMatcher("135.207.136.128/25");
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeServlet");
+
+ static {
+ try {
+ String ws = "\\s*";
+ // assume that \\ and \" have been replaced by X
+ String string = "\"[^\"]*\"";
+ //String string = "\"(?:[^\"\\\\]|\\\\.)*\"";
+ String number = "[+-]?(?:\\.\\d+|(?:0|[1-9]\\d*)(?:\\.\\d*)?)(?:[eE][+-]?\\d+)?";
+ String value = "(?:" + string + "|" + number + "|null|true|false)";
+ String item = string + ws + ":" + ws + value + ws;
+ String object = ws + "\\{" + ws + "(?:" + item + "(?:" + "," + ws + item + ")*)?\\}" + ws;
+ MetaDataPattern = Pattern.compile(object, Pattern.DOTALL);
+ } catch (Exception e) {
+ }
+ }
+
+ /**
+ * Get the NodeConfigurationManager
+ */
+ public void init() {
+ config = NodeConfigManager.getInstance();
+ logger.info("NODE0101 Node Servlet Configured");
+ }
+
+ private boolean down(HttpServletResponse resp) throws IOException {
+ if (config.isShutdown() || !config.isConfigured()) {
+ resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
+ logger.info("NODE0102 Rejecting request: Service is being quiesced");
+ return (true);
+ }
+ return (false);
+ }
+
+ /**
+ * Handle a GET for /internal/fetchProv
+ */
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ NodeUtils.setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"), getIdFromPath(req) + "");
+ if (down(resp)) {
+ return;
+ }
+ String path = req.getPathInfo();
+ String qs = req.getQueryString();
+ String ip = req.getRemoteAddr();
+ if (qs != null) {
+ path = path + "?" + qs;
+ }
+ if ("/internal/fetchProv".equals(path)) {
+ config.gofetch(ip);
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ return;
+ } else if (path.startsWith("/internal/resetSubscription/")) {
+ String subid = path.substring(28);
+ if (subid.length() != 0 && subid.indexOf('/') == -1) {
+ NodeMain.resetQueue(subid, ip);
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ return;
+ }
+ }
+ if (internalsubnet.matches(NodeUtils.getInetAddress(ip))) {
+ if (path.startsWith("/internal/logs/")) {
+ String f = path.substring(15);
+ File fn = new File(config.getLogDir() + "/" + f);
+ if (f.indexOf('/') != -1 || !fn.isFile()) {
+ logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND);
+ return;
+ }
+ byte[] buf = new byte[65536];
+ resp.setContentType("text/plain");
+ resp.setContentLength((int) fn.length());
+ resp.setStatus(200);
+ InputStream is = new FileInputStream(fn);
+ OutputStream os = resp.getOutputStream();
+ int i;
+ while ((i = is.read(buf)) > 0) {
+ os.write(buf, 0, i);
+ }
+ is.close();
+ return;
+ }
+ if (path.startsWith("/internal/rtt/")) {
+ String xip = path.substring(14);
+ long st = System.currentTimeMillis();
+ String status = " unknown";
+ try {
+ Socket s = new Socket(xip, 443);
+ s.close();
+ status = " connected";
+ } catch (Exception e) {
+ status = " error " + e.toString();
+ }
+ long dur = System.currentTimeMillis() - st;
+ resp.setContentType("text/plain");
+ resp.setStatus(200);
+ byte[] buf = (dur + status + "\n").getBytes();
+ resp.setContentLength(buf.length);
+ resp.getOutputStream().write(buf);
+ return;
+ }
+ }
+ logger.info("NODE0103 Rejecting invalid GET of " + path + " from " + ip);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND);
+ return;
+ }
+
+ /**
+ * Handle all PUT requests
+ */
+ protected void doPut(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ NodeUtils.setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"), getIdFromPath(req) + "");
+ common(req, resp, true);
+ }
+
+ /**
+ * Handle all DELETE requests
+ */
+ protected void doDelete(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+ NodeUtils.setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader("X-ATT-DR-ON-BEHALF-OF"), getIdFromPath(req) + "");
+ common(req, resp, false);
+ }
+
+ private void common(HttpServletRequest req, HttpServletResponse resp, boolean isput) throws ServletException, IOException {
+ if (down(resp)) {
+ return;
+ }
+ if (!req.isSecure()) {
+ logger.info("NODE0104 Rejecting insecure PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "https required on publish requests");
+ return;
+ }
+ String fileid = req.getPathInfo();
+ if (fileid == null) {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");
+ return;
+ }
+ String feedid = null;
+ String user = null;
+ String credentials = req.getHeader("Authorization");
+ if (credentials == null) {
+ logger.info("NODE0106 Rejecting unauthenticated PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Authorization header required");
+ return;
+ }
+ String ip = req.getRemoteAddr();
+ String lip = req.getLocalAddr();
+ String pubid = null;
+ String xpubid = null;
+ String rcvd = NodeUtils.logts(System.currentTimeMillis()) + ";from=" + ip + ";by=" + lip;
+ Target[] targets = null;
+ if (fileid.startsWith("/publish/")) {
+ fileid = fileid.substring(9);
+ int i = fileid.indexOf('/');
+ if (i == -1 || i == fileid.length() - 1) {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>. Possible missing fileid.");
+ return;
+ }
+ feedid = fileid.substring(0, i);
+ fileid = fileid.substring(i + 1);
+ pubid = config.getPublishId();
+ xpubid = req.getHeader("X-ATT-DR-PUBLISH-ID");
+ targets = config.getTargets(feedid);
+ } else if (fileid.startsWith("/internal/publish/")) {
+ if (!config.isAnotherNode(credentials, ip)) {
+ logger.info("NODE0107 Rejecting unauthorized node-to-node transfer attempt from " + ip);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN);
+ return;
+ }
+ fileid = fileid.substring(18);
+ pubid = req.getHeader("X-ATT-DR-PUBLISH-ID");
+ targets = config.parseRouting(req.getHeader("X-ATT-DR-ROUTING"));
+ } else {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");
+ return;
+ }
+ if (fileid.indexOf('/') != -1) {
+ logger.info("NODE0105 Rejecting bad URI for PUT or DELETE of " + req.getPathInfo() + " from " + req.getRemoteAddr());
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid request URI. Expecting <feed-publishing-url>/<fileid>.");
+ return;
+ }
+ String qs = req.getQueryString();
+ if (qs != null) {
+ fileid = fileid + "?" + qs;
+ }
+ String hp = config.getMyName();
+ int xp = config.getExtHttpsPort();
+ if (xp != 443) {
+ hp = hp + ":" + xp;
+ }
+ String logurl = "https://" + hp + "/internal/publish/" + fileid;
+ if (feedid != null) {
+ logurl = "https://" + hp + "/publish/" + feedid + "/" + fileid;
+ String reason = config.isPublishPermitted(feedid, credentials, ip);
+ if (reason != null) {
+ logger.info("NODE0111 Rejecting unauthorized publish attempt to feed " + feedid + " fileid " + fileid + " from " + ip + " reason " + reason);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, reason);
+ return;
+ }
+ user = config.getAuthUser(feedid, credentials);
+ String newnode = config.getIngressNode(feedid, user, ip);
+ if (newnode != null) {
+ String port = "";
+ int iport = config.getExtHttpsPort();
+ if (iport != 443) {
+ port = ":" + iport;
+ }
+ String redirto = "https://" + newnode + port + "/publish/" + feedid + "/" + fileid;
+ logger.info("NODE0108 Redirecting publish attempt for feed " + feedid + " user " + user + " ip " + ip + " to " + redirto);
+ resp.sendRedirect(redirto);
+ return;
+ }
+ resp.setHeader("X-ATT-DR-PUBLISH-ID", pubid);
+ }
+ String fbase = config.getSpoolDir() + "/" + pubid;
+ File data = new File(fbase);
+ File meta = new File(fbase + ".M");
+ OutputStream dos = null;
+ Writer mw = null;
+ InputStream is = null;
+ try {
+ StringBuffer mx = new StringBuffer();
+ mx.append(req.getMethod()).append('\t').append(fileid).append('\n');
+ Enumeration hnames = req.getHeaderNames();
+ String ctype = null;
+ while (hnames.hasMoreElements()) {
+ String hn = (String) hnames.nextElement();
+ String hnlc = hn.toLowerCase();
+ if ((isput && ("content-type".equals(hnlc) ||
+ "content-language".equals(hnlc) ||
+ "content-md5".equals(hnlc) ||
+ "content-range".equals(hnlc))) ||
+ "x-att-dr-meta".equals(hnlc) ||
+ (feedid == null && "x-att-dr-received".equals(hnlc)) ||
+ (hnlc.startsWith("x-") && !hnlc.startsWith("x-att-dr-"))) {
+ Enumeration hvals = req.getHeaders(hn);
+ while (hvals.hasMoreElements()) {
+ String hv = (String) hvals.nextElement();
+ if ("content-type".equals(hnlc)) {
+ ctype = hv;
+ }
+ if ("x-att-dr-meta".equals(hnlc)) {
+ if (hv.length() > 4096) {
+ logger.info("NODE0109 Rejecting publish attempt with metadata too long for feed " + feedid + " user " + user + " ip " + ip);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Metadata too long");
+ return;
+ }
+ if (!MetaDataPattern.matcher(hv.replaceAll("\\\\.", "X")).matches()) {
+ logger.info("NODE0109 Rejecting publish attempt with malformed metadata for feed " + feedid + " user " + user + " ip " + ip);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Malformed metadata");
+ return;
+ }
+ }
+ mx.append(hn).append('\t').append(hv).append('\n');
+ }
+ }
+ }
+ mx.append("X-ATT-DR-RECEIVED\t").append(rcvd).append('\n');
+ String metadata = mx.toString();
+ byte[] buf = new byte[1024 * 1024];
+ int i;
+ try {
+ is = req.getInputStream();
+ dos = new FileOutputStream(data);
+ while ((i = is.read(buf)) > 0) {
+ dos.write(buf, 0, i);
+ }
+ is.close();
+ is = null;
+ dos.close();
+ dos = null;
+ } catch (IOException ioe) {
+ long exlen = -1;
+ try {
+ exlen = Long.parseLong(req.getHeader("Content-Length"));
+ } catch (Exception e) {
+ }
+ StatusLog.logPubFail(pubid, feedid, logurl, req.getMethod(), ctype, exlen, data.length(), ip, user, ioe.getMessage());
+ throw ioe;
+ }
+ Path dpath = Paths.get(fbase);
+ for (Target t : targets) {
+ DestInfo di = t.getDestInfo();
+ if (di == null) {
+ // TODO: unknown destination
+ continue;
+ }
+ String dbase = di.getSpool() + "/" + pubid;
+ Files.createLink(Paths.get(dbase), dpath);
+ mw = new FileWriter(meta);
+ mw.write(metadata);
+ if (di.getSubId() == null) {
+ mw.write("X-ATT-DR-ROUTING\t" + t.getRouting() + "\n");
+ }
+ mw.close();
+ meta.renameTo(new File(dbase + ".M"));
+ }
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ resp.getOutputStream().close();
+ StatusLog.logPub(pubid, feedid, logurl, req.getMethod(), ctype, data.length(), ip, user, HttpServletResponse.SC_NO_CONTENT);
+ } catch (IOException ioe) {
+ logger.info("NODE0110 IO Exception receiving publish attempt for feed " + feedid + " user " + user + " ip " + ip + " " + ioe.toString(), ioe);
+ throw ioe;
+ } finally {
+ if (is != null) {
+ try {
+ is.close();
+ } catch (Exception e) {
+ }
+ }
+ if (dos != null) {
+ try {
+ dos.close();
+ } catch (Exception e) {
+ }
+ }
+ if (mw != null) {
+ try {
+ mw.close();
+ } catch (Exception e) {
+ }
+ }
+ try {
+ data.delete();
+ } catch (Exception e) {
+ }
+ try {
+ meta.delete();
+ } catch (Exception e) {
+ }
+ }
+ }
+
+ private int getIdFromPath(HttpServletRequest req) {
+ String path = req.getPathInfo();
+ if (path == null || path.length() < 2)
+ return -1;
+ try {
+ return Integer.parseInt(path.substring(1));
+ } catch (NumberFormatException e) {
+ return -1;
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;\r
-\r
-import java.security.*;\r
-import java.io.*;\r
-import java.util.*;\r
-import java.security.cert.*;\r
-import java.net.*;\r
-import java.text.*;\r
-import org.apache.commons.codec.binary.Base64;\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-import org.slf4j.MDC;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * Utility functions for the data router node\r
- */\r
-public class NodeUtils {\r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeUtils");\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeUtils");\r
- private static SimpleDateFormat logdate;\r
- static {\r
- logdate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");\r
- logdate.setTimeZone(TimeZone.getTimeZone("GMT"));\r
- }\r
- private NodeUtils() {}\r
- /**\r
- * Base64 encode a byte array\r
- * @param raw The bytes to be encoded\r
- * @return The encoded string\r
- */\r
- public static String base64Encode(byte[] raw) {\r
- return(Base64.encodeBase64String(raw));\r
- }\r
- /**\r
- * Given a user and password, generate the credentials\r
- * @param user User name\r
- * @param password User password\r
- * @return Authorization header value\r
- */\r
- public static String getAuthHdr(String user, String password) {\r
- if (user == null || password == null) {\r
- return(null);\r
- }\r
- return("Basic " + base64Encode((user + ":" + password).getBytes()));\r
- }\r
- /**\r
- * Given a node name, generate the credentials\r
- * @param node Node name\r
- */\r
- public static String getNodeAuthHdr(String node, String key) {\r
- try {\r
- MessageDigest md = MessageDigest.getInstance("SHA");\r
- md.update(key.getBytes());\r
- md.update(node.getBytes());\r
- md.update(key.getBytes());\r
- return(getAuthHdr(node, base64Encode(md.digest())));\r
- } catch (Exception e) {\r
- return(null);\r
- }\r
- }\r
- /**\r
- * Given a keystore file and its password, return the value of the CN of the first private key entry with a certificate.\r
- * @param kstype The type of keystore\r
- * @param ksfile The file name of the keystore\r
- * @param kspass The password of the keystore\r
- * @return CN of the certificate subject or null\r
- */\r
- public static String getCanonicalName(String kstype, String ksfile, String kspass) {\r
- try {\r
- KeyStore ks = KeyStore.getInstance(kstype);\r
- ks.load(new FileInputStream(ksfile), kspass.toCharArray());\r
- return(getCanonicalName(ks));\r
- } catch (Exception e) {\r
- setIpAndFqdnForEelf("getCanonicalName");\r
- eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_LOAD_ERROR, ksfile, e.toString());\r
- logger.error("NODE0401 Error loading my keystore file + " + ksfile + " " + e.toString(), e);\r
- return(null);\r
- }\r
- }\r
- /**\r
- * Given a keystore, return the value of the CN of the first private key entry with a certificate.\r
- * @param ks The KeyStore\r
- * @return CN of the certificate subject or null\r
- */\r
- public static String getCanonicalName(KeyStore ks) {\r
- try {\r
- Enumeration<String> aliases = ks.aliases();\r
- while (aliases.hasMoreElements()) {\r
- String s = aliases.nextElement();\r
- if (ks.entryInstanceOf(s, KeyStore.PrivateKeyEntry.class)) {\r
- X509Certificate c = (X509Certificate)ks.getCertificate(s);\r
- if (c != null) {\r
- String subject = c.getSubjectX500Principal().getName();\r
- String[] parts = subject.split(",");\r
- if (parts.length < 1) {\r
- return(null);\r
- }\r
- subject = parts[0].trim();\r
- if (!subject.startsWith("CN=")) {\r
- return(null);\r
-\r
- }\r
- return(subject.substring(3));\r
- }\r
- }\r
- }\r
- } catch (Exception e) {\r
- logger.error("NODE0402 Error extracting my name from my keystore file " + e.toString(), e);\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Given a string representation of an IP address, get the corresponding byte array\r
- * @param ip The IP address as a string\r
- * @return The IP address as a byte array or null if the address is invalid\r
- */\r
- public static byte[] getInetAddress(String ip) {\r
- try {\r
- return(InetAddress.getByName(ip).getAddress());\r
- } catch (Exception e) {\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Given a uri with parameters, split out the feed ID and file ID\r
- */\r
- public static String[] getFeedAndFileID(String uriandparams) {\r
- int end = uriandparams.length();\r
- int i = uriandparams.indexOf('#');\r
- if (i != -1 && i < end) {\r
- end = i;\r
- }\r
- i = uriandparams.indexOf('?');\r
- if (i != -1 && i < end) {\r
- end = i;\r
- }\r
- end = uriandparams.lastIndexOf('/', end);\r
- if (end < 2) {\r
- return(null);\r
- }\r
- i = uriandparams.lastIndexOf('/', end - 1);\r
- if (i == -1) {\r
- return(null);\r
- }\r
- return(new String[] { uriandparams.substring(i + 1, end - 1), uriandparams.substring(end + 1) });\r
- }\r
- /**\r
- * Escape fields that might contain vertical bar, backslash, or newline by replacing them with backslash p, backslash e and backslash n.\r
- */\r
- public static String loge(String s) {\r
- if (s == null) {\r
- return(s);\r
- }\r
- return(s.replaceAll("\\\\", "\\\\e").replaceAll("\\|", "\\\\p").replaceAll("\n", "\\\\n"));\r
- }\r
- /**\r
- * Undo what loge does.\r
- */\r
- public static String unloge(String s) {\r
- if (s == null) {\r
- return(s);\r
- }\r
- return(s.replaceAll("\\\\p", "\\|").replaceAll("\\\\n", "\n").replaceAll("\\\\e", "\\\\"));\r
- }\r
- /**\r
- * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ\r
- */\r
- public static String logts(long when) {\r
- return(logts(new Date(when)));\r
- }\r
- /**\r
- * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ\r
- */\r
- public static synchronized String logts(Date when) {\r
- return(logdate.format(when));\r
- }\r
- \r
- /* Method prints method name, server FQDN and IP Address of the machine in EELF logs\r
- * @Method - setIpAndFqdnForEelf - Rally:US664892 \r
- * @Params - method, prints method name in EELF log.\r
- */ \r
- public static void setIpAndFqdnForEelf(String method) {\r
- MDC.clear();\r
- MDC.put(MDC_SERVICE_NAME, method);\r
- try {\r
- MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());\r
- MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- }\r
-\r
- }\r
- \r
-\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;
+
+import java.security.*;
+import java.io.*;
+import java.util.*;
+import java.security.cert.*;
+import java.net.*;
+import java.text.*;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+import org.slf4j.MDC;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * Utility functions for the data router node
+ */
+public class NodeUtils {
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.NodeUtils");
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.NodeUtils");
+ private static SimpleDateFormat logdate;
+
+ static {
+ logdate = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
+ logdate.setTimeZone(TimeZone.getTimeZone("GMT"));
+ }
+
+ private NodeUtils() {
+ }
+
+ /**
+ * Base64 encode a byte array
+ *
+ * @param raw The bytes to be encoded
+ * @return The encoded string
+ */
+ public static String base64Encode(byte[] raw) {
+ return (Base64.encodeBase64String(raw));
+ }
+
+ /**
+ * Given a user and password, generate the credentials
+ *
+ * @param user User name
+ * @param password User password
+ * @return Authorization header value
+ */
+ public static String getAuthHdr(String user, String password) {
+ if (user == null || password == null) {
+ return (null);
+ }
+ return ("Basic " + base64Encode((user + ":" + password).getBytes()));
+ }
+
+ /**
+ * Given a node name, generate the credentials
+ *
+ * @param node Node name
+ */
+ public static String getNodeAuthHdr(String node, String key) {
+ try {
+ MessageDigest md = MessageDigest.getInstance("SHA");
+ md.update(key.getBytes());
+ md.update(node.getBytes());
+ md.update(key.getBytes());
+ return (getAuthHdr(node, base64Encode(md.digest())));
+ } catch (Exception e) {
+ return (null);
+ }
+ }
+
+ /**
+ * Given a keystore file and its password, return the value of the CN of the first private key entry with a certificate.
+ *
+ * @param kstype The type of keystore
+ * @param ksfile The file name of the keystore
+ * @param kspass The password of the keystore
+ * @return CN of the certificate subject or null
+ */
+ public static String getCanonicalName(String kstype, String ksfile, String kspass) {
+ try {
+ KeyStore ks = KeyStore.getInstance(kstype);
+ ks.load(new FileInputStream(ksfile), kspass.toCharArray());
+ return (getCanonicalName(ks));
+ } catch (Exception e) {
+ setIpAndFqdnForEelf("getCanonicalName");
+ eelflogger.error(EelfMsgs.MESSAGE_KEYSTORE_LOAD_ERROR, ksfile, e.toString());
+ logger.error("NODE0401 Error loading my keystore file + " + ksfile + " " + e.toString(), e);
+ return (null);
+ }
+ }
+
+ /**
+ * Given a keystore, return the value of the CN of the first private key entry with a certificate.
+ *
+ * @param ks The KeyStore
+ * @return CN of the certificate subject or null
+ */
+ public static String getCanonicalName(KeyStore ks) {
+ try {
+ Enumeration<String> aliases = ks.aliases();
+ while (aliases.hasMoreElements()) {
+ String s = aliases.nextElement();
+ if (ks.entryInstanceOf(s, KeyStore.PrivateKeyEntry.class)) {
+ X509Certificate c = (X509Certificate) ks.getCertificate(s);
+ if (c != null) {
+ String subject = c.getSubjectX500Principal().getName();
+ String[] parts = subject.split(",");
+ if (parts.length < 1) {
+ return (null);
+ }
+ subject = parts[0].trim();
+ if (!subject.startsWith("CN=")) {
+ return (null);
+
+ }
+ return (subject.substring(3));
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.error("NODE0402 Error extracting my name from my keystore file " + e.toString(), e);
+ }
+ return (null);
+ }
+
+ /**
+ * Given a string representation of an IP address, get the corresponding byte array
+ *
+ * @param ip The IP address as a string
+ * @return The IP address as a byte array or null if the address is invalid
+ */
+ public static byte[] getInetAddress(String ip) {
+ try {
+ return (InetAddress.getByName(ip).getAddress());
+ } catch (Exception e) {
+ }
+ return (null);
+ }
+
+ /**
+ * Given a uri with parameters, split out the feed ID and file ID
+ */
+ public static String[] getFeedAndFileID(String uriandparams) {
+ int end = uriandparams.length();
+ int i = uriandparams.indexOf('#');
+ if (i != -1 && i < end) {
+ end = i;
+ }
+ i = uriandparams.indexOf('?');
+ if (i != -1 && i < end) {
+ end = i;
+ }
+ end = uriandparams.lastIndexOf('/', end);
+ if (end < 2) {
+ return (null);
+ }
+ i = uriandparams.lastIndexOf('/', end - 1);
+ if (i == -1) {
+ return (null);
+ }
+ return (new String[]{uriandparams.substring(i + 1, end - 1), uriandparams.substring(end + 1)});
+ }
+
+ /**
+ * Escape fields that might contain vertical bar, backslash, or newline by replacing them with backslash p, backslash e and backslash n.
+ */
+ public static String loge(String s) {
+ if (s == null) {
+ return (s);
+ }
+ return (s.replaceAll("\\\\", "\\\\e").replaceAll("\\|", "\\\\p").replaceAll("\n", "\\\\n"));
+ }
+
+ /**
+ * Undo what loge does.
+ */
+ public static String unloge(String s) {
+ if (s == null) {
+ return (s);
+ }
+ return (s.replaceAll("\\\\p", "\\|").replaceAll("\\\\n", "\n").replaceAll("\\\\e", "\\\\"));
+ }
+
+ /**
+ * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ
+ */
+ public static String logts(long when) {
+ return (logts(new Date(when)));
+ }
+
+ /**
+ * Format a logging timestamp as yyyy-mm-ddThh:mm:ss.mmmZ
+ */
+ public static synchronized String logts(Date when) {
+ return (logdate.format(when));
+ }
+
+ /* Method prints method name, server FQDN and IP Address of the machine in EELF logs
+ * @Method - setIpAndFqdnForEelf - Rally:US664892
+ * @Params - method, prints method name in EELF log.
+ */
+ public static void setIpAndFqdnForEelf(String method) {
+ MDC.clear();
+ MDC.put(MDC_SERVICE_NAME, method);
+ try {
+ MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());
+ MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ }
+
+
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-\r
-/**\r
- * Given a set of node names and next hops, identify and ignore any cycles and figure out the sequence of next hops to get from this node to any other node\r
- */\r
-\r
-public class PathFinder {\r
- private static class Hop {\r
- public boolean mark;\r
- public boolean bad;\r
- public NodeConfig.ProvHop basis;\r
- }\r
- private Vector<String> errors = new Vector<String>();\r
- private Hashtable<String, String> routes = new Hashtable<String, String>();\r
- /**\r
- * Get list of errors encountered while finding paths\r
- * @return array of error descriptions\r
- */\r
- public String[] getErrors() {\r
- return(errors.toArray(new String[errors.size()]));\r
- }\r
- /**\r
- * Get the route from this node to the specified node\r
- * @param destination node\r
- * @return list of node names separated by and ending with "/"\r
- */\r
- public String getPath(String destination) {\r
- String ret = routes.get(destination);\r
- if (ret == null) {\r
- return("");\r
- }\r
- return(ret);\r
- }\r
- private String plot(String from, String to, Hashtable<String, Hop> info) {\r
- Hop nh = info.get(from);\r
- if (nh == null || nh.bad) {\r
- return(to);\r
- }\r
- if (nh.mark) {\r
- // loop detected;\r
- while (!nh.bad) {\r
- nh.bad = true;\r
- errors.add(nh.basis + " is part of a cycle");\r
- nh = info.get(nh.basis.getVia());\r
- }\r
- return(to);\r
- }\r
- nh.mark = true;\r
- String x = plot(nh.basis.getVia(), to, info);\r
- nh.mark = false;\r
- if (nh.bad) {\r
- return(to);\r
- }\r
- return(nh.basis.getVia() + "/" + x);\r
- }\r
- /**\r
- * Find routes from a specified origin to all of the nodes given a set of specified next hops.\r
- * @param origin where we start\r
- * @param nodes where we can go\r
- * @param hops detours along the way\r
- */\r
- public PathFinder(String origin, String[] nodes, NodeConfig.ProvHop[] hops) {\r
- HashSet<String> known = new HashSet<String>();\r
- Hashtable<String, Hashtable<String, Hop>> ht = new Hashtable<String, Hashtable<String, Hop>>();\r
- for (String n: nodes) {\r
- known.add(n);\r
- ht.put(n, new Hashtable<String, Hop>());\r
- }\r
- for (NodeConfig.ProvHop ph: hops) {\r
- if (!known.contains(ph.getFrom())) {\r
- errors.add(ph + " references unknown from node");\r
- continue;\r
- }\r
- if (!known.contains(ph.getTo())) {\r
- errors.add(ph + " references unknown destination node");\r
- continue;\r
- }\r
- Hashtable<String, Hop> ht2 = ht.get(ph.getTo());\r
- Hop h = ht2.get(ph.getFrom());\r
- if (h != null) {\r
- h.bad = true;\r
- errors.add(ph + " gives duplicate next hop - previous via was " + h.basis.getVia());\r
- continue;\r
- }\r
- h = new Hop();\r
- h.basis = ph;\r
- ht2.put(ph.getFrom(), h);\r
- if (!known.contains(ph.getVia())) {\r
- errors.add(ph + " references unknown via node");\r
- h.bad = true;\r
- continue;\r
- }\r
- if (ph.getVia().equals(ph.getTo())) {\r
- errors.add(ph + " gives destination as via");\r
- h.bad = true;\r
- continue;\r
- }\r
- }\r
- for (String n: known) {\r
- if (n.equals(origin)) {\r
- routes.put(n, "");\r
- }\r
- routes.put(n, plot(origin, n, ht.get(n)) + "/");\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+
+/**
+ * Given a set of node names and next hops, identify and ignore any cycles and figure out the sequence of next hops to get from this node to any other node
+ */
+
+public class PathFinder {
+ private static class Hop {
+ public boolean mark;
+ public boolean bad;
+ public NodeConfig.ProvHop basis;
+ }
+
+ private Vector<String> errors = new Vector<String>();
+ private Hashtable<String, String> routes = new Hashtable<String, String>();
+
+ /**
+ * Get list of errors encountered while finding paths
+ *
+ * @return array of error descriptions
+ */
+ public String[] getErrors() {
+ return (errors.toArray(new String[errors.size()]));
+ }
+
+ /**
+ * Get the route from this node to the specified node
+ *
+ * @param destination node
+ * @return list of node names separated by and ending with "/"
+ */
+ public String getPath(String destination) {
+ String ret = routes.get(destination);
+ if (ret == null) {
+ return ("");
+ }
+ return (ret);
+ }
+
+ private String plot(String from, String to, Hashtable<String, Hop> info) {
+ Hop nh = info.get(from);
+ if (nh == null || nh.bad) {
+ return (to);
+ }
+ if (nh.mark) {
+ // loop detected;
+ while (!nh.bad) {
+ nh.bad = true;
+ errors.add(nh.basis + " is part of a cycle");
+ nh = info.get(nh.basis.getVia());
+ }
+ return (to);
+ }
+ nh.mark = true;
+ String x = plot(nh.basis.getVia(), to, info);
+ nh.mark = false;
+ if (nh.bad) {
+ return (to);
+ }
+ return (nh.basis.getVia() + "/" + x);
+ }
+
+ /**
+ * Find routes from a specified origin to all of the nodes given a set of specified next hops.
+ *
+ * @param origin where we start
+ * @param nodes where we can go
+ * @param hops detours along the way
+ */
+ public PathFinder(String origin, String[] nodes, NodeConfig.ProvHop[] hops) {
+ HashSet<String> known = new HashSet<String>();
+ Hashtable<String, Hashtable<String, Hop>> ht = new Hashtable<String, Hashtable<String, Hop>>();
+ for (String n : nodes) {
+ known.add(n);
+ ht.put(n, new Hashtable<String, Hop>());
+ }
+ for (NodeConfig.ProvHop ph : hops) {
+ if (!known.contains(ph.getFrom())) {
+ errors.add(ph + " references unknown from node");
+ continue;
+ }
+ if (!known.contains(ph.getTo())) {
+ errors.add(ph + " references unknown destination node");
+ continue;
+ }
+ Hashtable<String, Hop> ht2 = ht.get(ph.getTo());
+ Hop h = ht2.get(ph.getFrom());
+ if (h != null) {
+ h.bad = true;
+ errors.add(ph + " gives duplicate next hop - previous via was " + h.basis.getVia());
+ continue;
+ }
+ h = new Hop();
+ h.basis = ph;
+ ht2.put(ph.getFrom(), h);
+ if (!known.contains(ph.getVia())) {
+ errors.add(ph + " references unknown via node");
+ h.bad = true;
+ continue;
+ }
+ if (ph.getVia().equals(ph.getTo())) {
+ errors.add(ph + " gives destination as via");
+ h.bad = true;
+ continue;
+ }
+ }
+ for (String n : known) {
+ if (n.equals(origin)) {
+ routes.put(n, "");
+ }
+ routes.put(n, plot(origin, n, ht.get(n)) + "/");
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.io.*;\r
-import java.util.*;\r
-import org.json.*;\r
-import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;\r
-import org.apache.log4j.Logger;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * Parser for provisioning data from the provisioning server.\r
- * <p>\r
- * The ProvData class uses a Reader for the text configuration from the\r
- * provisioning server to construct arrays of raw configuration entries.\r
- */\r
-public class ProvData {\r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.ProvData");\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.ProvData");\r
- private NodeConfig.ProvNode[] pn;\r
- private NodeConfig.ProvParam[] pp;\r
- private NodeConfig.ProvFeed[] pf;\r
- private NodeConfig.ProvFeedUser[] pfu;\r
- private NodeConfig.ProvFeedSubnet[] pfsn;\r
- private NodeConfig.ProvSubscription[] ps;\r
- private NodeConfig.ProvForceIngress[] pfi;\r
- private NodeConfig.ProvForceEgress[] pfe;\r
- private NodeConfig.ProvHop[] ph;\r
- private static String[] gvasa(JSONArray a, int index) {\r
- return(gvasa(a.get(index)));\r
- }\r
- private static String[] gvasa(JSONObject o, String key) {\r
- return(gvasa(o.opt(key)));\r
- }\r
- private static String[] gvasa(Object o) {\r
- if (o instanceof JSONArray) {\r
- JSONArray a = (JSONArray)o;\r
- Vector<String> v = new Vector<String>();\r
- for (int i = 0; i < a.length(); i++) {\r
- String s = gvas(a, i);\r
- if (s != null) {\r
- v.add(s);\r
- }\r
- }\r
- return(v.toArray(new String[v.size()]));\r
- } else {\r
- String s = gvas(o);\r
- if (s == null) {\r
- return(new String[0]);\r
- } else {\r
- return(new String[] { s });\r
- }\r
- }\r
- }\r
- private static String gvas(JSONArray a, int index) {\r
- return(gvas(a.get(index)));\r
- }\r
- private static String gvas(JSONObject o, String key) {\r
- return(gvas(o.opt(key)));\r
- }\r
- private static String gvas(Object o) {\r
- if (o instanceof Boolean || o instanceof Number || o instanceof String) {\r
- return(o.toString());\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Construct raw provisioing data entries from the text (JSON)\r
- * provisioning document received from the provisioning server\r
- * @param r The reader for the JSON text.\r
- */\r
- public ProvData(Reader r) throws IOException {\r
- Vector<NodeConfig.ProvNode> pnv = new Vector<NodeConfig.ProvNode>();\r
- Vector<NodeConfig.ProvParam> ppv = new Vector<NodeConfig.ProvParam>();\r
- Vector<NodeConfig.ProvFeed> pfv = new Vector<NodeConfig.ProvFeed>();\r
- Vector<NodeConfig.ProvFeedUser> pfuv = new Vector<NodeConfig.ProvFeedUser>();\r
- Vector<NodeConfig.ProvFeedSubnet> pfsnv = new Vector<NodeConfig.ProvFeedSubnet>();\r
- Vector<NodeConfig.ProvSubscription> psv = new Vector<NodeConfig.ProvSubscription>();\r
- Vector<NodeConfig.ProvForceIngress> pfiv = new Vector<NodeConfig.ProvForceIngress>();\r
- Vector<NodeConfig.ProvForceEgress> pfev = new Vector<NodeConfig.ProvForceEgress>();\r
- Vector<NodeConfig.ProvHop> phv = new Vector<NodeConfig.ProvHop>();\r
- try {\r
- JSONTokener jtx = new JSONTokener(r);\r
- JSONObject jcfg = new JSONObject(jtx);\r
- char c = jtx.nextClean();\r
- if (c != '\0') {\r
- throw new JSONException("Spurious characters following configuration");\r
- }\r
- r.close();\r
- JSONArray jfeeds = jcfg.optJSONArray("feeds");\r
- if (jfeeds != null) {\r
- for (int fx = 0; fx < jfeeds.length(); fx++) {\r
- JSONObject jfeed = jfeeds.getJSONObject(fx);\r
- String stat = null;\r
- if (jfeed.optBoolean("suspend", false)) {\r
- stat = "Feed is suspended";\r
- }\r
- if (jfeed.optBoolean("deleted", false)) {\r
- stat = "Feed is deleted";\r
- }\r
- String fid = gvas(jfeed, "feedid");\r
- String fname = gvas(jfeed, "name");\r
- String fver = gvas(jfeed, "version");\r
- pfv.add(new NodeConfig.ProvFeed(fid, fname + "//" + fver, stat));\r
- JSONObject jauth = jfeed.optJSONObject("authorization");\r
- if (jauth == null) {\r
- continue;\r
- }\r
- JSONArray jeids = jauth.optJSONArray("endpoint_ids");\r
- if (jeids != null) {\r
- for (int ux = 0; ux < jeids.length(); ux++) {\r
- JSONObject ju = jeids.getJSONObject(ux);\r
- String login = gvas(ju, "id");\r
- String password = gvas(ju, "password");\r
- pfuv.add(new NodeConfig.ProvFeedUser(fid, login, NodeUtils.getAuthHdr(login, password)));\r
- }\r
- }\r
- JSONArray jeips = jauth.optJSONArray("endpoint_addrs");\r
- if (jeips != null) {\r
- for (int ix = 0; ix < jeips.length(); ix++) {\r
- String sn = gvas(jeips, ix);\r
- pfsnv.add(new NodeConfig.ProvFeedSubnet(fid, sn));\r
- }\r
- }\r
- }\r
- }\r
- JSONArray jsubs = jcfg.optJSONArray("subscriptions");\r
- if (jsubs != null) {\r
- for (int sx = 0; sx < jsubs.length(); sx++) {\r
- JSONObject jsub = jsubs.getJSONObject(sx);\r
- if (jsub.optBoolean("suspend", false)) {\r
- continue;\r
- }\r
- String sid = gvas(jsub, "subid");\r
- String fid = gvas(jsub, "feedid");\r
- JSONObject jdel = jsub.getJSONObject("delivery");\r
- String delurl = gvas(jdel, "url");\r
- String id = gvas(jdel, "user");\r
- String password = gvas(jdel, "password");\r
- boolean monly = jsub.getBoolean("metadataOnly");\r
- boolean use100 = jdel.getBoolean("use100");\r
- psv.add(new NodeConfig.ProvSubscription(sid, fid, delurl, id, NodeUtils.getAuthHdr(id, password), monly, use100));\r
- }\r
- }\r
- JSONObject jparams = jcfg.optJSONObject("parameters");\r
- if (jparams != null) {\r
- for (String pname: JSONObject.getNames(jparams)) {\r
- String pvalue = gvas(jparams, pname);\r
- if (pvalue != null) {\r
- ppv.add(new NodeConfig.ProvParam(pname, pvalue));\r
- }\r
- }\r
- String sfx = gvas(jparams, "PROV_DOMAIN");\r
- JSONArray jnodes = jparams.optJSONArray("NODES");\r
- if (jnodes != null) {\r
- for (int nx = 0; nx < jnodes.length(); nx++) {\r
- String nn = gvas(jnodes, nx);\r
- if (nn.indexOf('.') == -1) {\r
- nn = nn + "." + sfx;\r
- }\r
- pnv.add(new NodeConfig.ProvNode(nn));\r
- }\r
- }\r
- }\r
- JSONArray jingresses = jcfg.optJSONArray("ingress");\r
- if (jingresses != null) {\r
- for (int fx = 0; fx < jingresses.length(); fx++) {\r
- JSONObject jingress = jingresses.getJSONObject(fx);\r
- String fid = gvas(jingress, "feedid");\r
- String subnet = gvas(jingress, "subnet");\r
- String user = gvas(jingress, "user");\r
- String[] nodes = gvasa(jingress, "node");\r
- if (fid == null || "".equals(fid)) {\r
- continue;\r
- }\r
- if ("".equals(subnet)) {\r
- subnet = null;\r
- }\r
- if ("".equals(user)) {\r
- user = null;\r
- }\r
- pfiv.add(new NodeConfig.ProvForceIngress(fid, subnet, user, nodes));\r
- }\r
- }\r
- JSONObject jegresses = jcfg.optJSONObject("egress");\r
- if (jegresses != null && JSONObject.getNames(jegresses) != null) {\r
- for (String esid: JSONObject.getNames(jegresses)) {\r
- String enode = gvas(jegresses, esid);\r
- if (esid != null && enode != null && !"".equals(esid) && !"".equals(enode)) {\r
- pfev.add(new NodeConfig.ProvForceEgress(esid, enode));\r
- }\r
- }\r
- }\r
- JSONArray jhops = jcfg.optJSONArray("routing");\r
- if (jhops != null) {\r
- for (int fx = 0; fx < jhops.length(); fx++) {\r
- JSONObject jhop = jhops.getJSONObject(fx);\r
- String from = gvas(jhop, "from");\r
- String to = gvas(jhop, "to");\r
- String via = gvas(jhop, "via");\r
- if (from == null || to == null || via == null || "".equals(from) || "".equals(to) || "".equals(via)) {\r
- continue;\r
- }\r
- phv.add(new NodeConfig.ProvHop(from, to, via));\r
- }\r
- }\r
- } catch (JSONException jse) {\r
- NodeUtils.setIpAndFqdnForEelf("ProvData");\r
- eelflogger.error(EelfMsgs.MESSAGE_PARSING_ERROR, jse.toString());\r
- logger.error("NODE0201 Error parsing configuration data from provisioning server " + jse.toString(), jse);\r
- throw new IOException(jse.toString(), jse);\r
- }\r
- pn = pnv.toArray(new NodeConfig.ProvNode[pnv.size()]);\r
- pp = ppv.toArray(new NodeConfig.ProvParam[ppv.size()]);\r
- pf = pfv.toArray(new NodeConfig.ProvFeed[pfv.size()]);\r
- pfu = pfuv.toArray(new NodeConfig.ProvFeedUser[pfuv.size()]);\r
- pfsn = pfsnv.toArray(new NodeConfig.ProvFeedSubnet[pfsnv.size()]);\r
- ps = psv.toArray(new NodeConfig.ProvSubscription[psv.size()]);\r
- pfi = pfiv.toArray(new NodeConfig.ProvForceIngress[pfiv.size()]);\r
- pfe = pfev.toArray(new NodeConfig.ProvForceEgress[pfev.size()]);\r
- ph = phv.toArray(new NodeConfig.ProvHop[phv.size()]);\r
- }\r
- /**\r
- * Get the raw node configuration entries\r
- */\r
- public NodeConfig.ProvNode[] getNodes() {\r
- return(pn);\r
- }\r
- /**\r
- * Get the raw parameter configuration entries\r
- */\r
- public NodeConfig.ProvParam[] getParams() {\r
- return(pp);\r
- }\r
- /**\r
- * Ge the raw feed configuration entries\r
- */\r
- public NodeConfig.ProvFeed[] getFeeds() {\r
- return(pf);\r
- }\r
- /**\r
- * Get the raw feed user configuration entries\r
- */\r
- public NodeConfig.ProvFeedUser[] getFeedUsers() {\r
- return(pfu);\r
- }\r
- /**\r
- * Get the raw feed subnet configuration entries\r
- */\r
- public NodeConfig.ProvFeedSubnet[] getFeedSubnets() {\r
- return(pfsn);\r
- }\r
- /**\r
- * Get the raw subscription entries\r
- */\r
- public NodeConfig.ProvSubscription[] getSubscriptions() {\r
- return(ps);\r
- }\r
- /**\r
- * Get the raw forced ingress entries\r
- */\r
- public NodeConfig.ProvForceIngress[] getForceIngress() {\r
- return(pfi);\r
- }\r
- /**\r
- * Get the raw forced egress entries\r
- */\r
- public NodeConfig.ProvForceEgress[] getForceEgress() {\r
- return(pfe);\r
- }\r
- /**\r
- * Get the raw next hop entries\r
- */\r
- public NodeConfig.ProvHop[] getHops() {\r
- return(ph);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.io.*;
+import java.util.*;
+
+import org.json.*;
+import org.onap.dmaap.datarouter.node.eelf.EelfMsgs;
+import org.apache.log4j.Logger;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * Parser for provisioning data from the provisioning server.
+ * <p>
+ * The ProvData class uses a Reader for the text configuration from the
+ * provisioning server to construct arrays of raw configuration entries.
+ */
+public class ProvData {
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.node.ProvData");
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.node.ProvData");
+ private NodeConfig.ProvNode[] pn;
+ private NodeConfig.ProvParam[] pp;
+ private NodeConfig.ProvFeed[] pf;
+ private NodeConfig.ProvFeedUser[] pfu;
+ private NodeConfig.ProvFeedSubnet[] pfsn;
+ private NodeConfig.ProvSubscription[] ps;
+ private NodeConfig.ProvForceIngress[] pfi;
+ private NodeConfig.ProvForceEgress[] pfe;
+ private NodeConfig.ProvHop[] ph;
+
+ private static String[] gvasa(JSONArray a, int index) {
+ return (gvasa(a.get(index)));
+ }
+
+ private static String[] gvasa(JSONObject o, String key) {
+ return (gvasa(o.opt(key)));
+ }
+
+ private static String[] gvasa(Object o) {
+ if (o instanceof JSONArray) {
+ JSONArray a = (JSONArray) o;
+ Vector<String> v = new Vector<String>();
+ for (int i = 0; i < a.length(); i++) {
+ String s = gvas(a, i);
+ if (s != null) {
+ v.add(s);
+ }
+ }
+ return (v.toArray(new String[v.size()]));
+ } else {
+ String s = gvas(o);
+ if (s == null) {
+ return (new String[0]);
+ } else {
+ return (new String[]{s});
+ }
+ }
+ }
+
+ private static String gvas(JSONArray a, int index) {
+ return (gvas(a.get(index)));
+ }
+
+ private static String gvas(JSONObject o, String key) {
+ return (gvas(o.opt(key)));
+ }
+
+ private static String gvas(Object o) {
+ if (o instanceof Boolean || o instanceof Number || o instanceof String) {
+ return (o.toString());
+ }
+ return (null);
+ }
+
+ /**
+ * Construct raw provisioing data entries from the text (JSON)
+ * provisioning document received from the provisioning server
+ *
+ * @param r The reader for the JSON text.
+ */
+ public ProvData(Reader r) throws IOException {
+ Vector<NodeConfig.ProvNode> pnv = new Vector<NodeConfig.ProvNode>();
+ Vector<NodeConfig.ProvParam> ppv = new Vector<NodeConfig.ProvParam>();
+ Vector<NodeConfig.ProvFeed> pfv = new Vector<NodeConfig.ProvFeed>();
+ Vector<NodeConfig.ProvFeedUser> pfuv = new Vector<NodeConfig.ProvFeedUser>();
+ Vector<NodeConfig.ProvFeedSubnet> pfsnv = new Vector<NodeConfig.ProvFeedSubnet>();
+ Vector<NodeConfig.ProvSubscription> psv = new Vector<NodeConfig.ProvSubscription>();
+ Vector<NodeConfig.ProvForceIngress> pfiv = new Vector<NodeConfig.ProvForceIngress>();
+ Vector<NodeConfig.ProvForceEgress> pfev = new Vector<NodeConfig.ProvForceEgress>();
+ Vector<NodeConfig.ProvHop> phv = new Vector<NodeConfig.ProvHop>();
+ try {
+ JSONTokener jtx = new JSONTokener(r);
+ JSONObject jcfg = new JSONObject(jtx);
+ char c = jtx.nextClean();
+ if (c != '\0') {
+ throw new JSONException("Spurious characters following configuration");
+ }
+ r.close();
+ JSONArray jfeeds = jcfg.optJSONArray("feeds");
+ if (jfeeds != null) {
+ for (int fx = 0; fx < jfeeds.length(); fx++) {
+ JSONObject jfeed = jfeeds.getJSONObject(fx);
+ String stat = null;
+ if (jfeed.optBoolean("suspend", false)) {
+ stat = "Feed is suspended";
+ }
+ if (jfeed.optBoolean("deleted", false)) {
+ stat = "Feed is deleted";
+ }
+ String fid = gvas(jfeed, "feedid");
+ String fname = gvas(jfeed, "name");
+ String fver = gvas(jfeed, "version");
+ pfv.add(new NodeConfig.ProvFeed(fid, fname + "//" + fver, stat));
+ JSONObject jauth = jfeed.optJSONObject("authorization");
+ if (jauth == null) {
+ continue;
+ }
+ JSONArray jeids = jauth.optJSONArray("endpoint_ids");
+ if (jeids != null) {
+ for (int ux = 0; ux < jeids.length(); ux++) {
+ JSONObject ju = jeids.getJSONObject(ux);
+ String login = gvas(ju, "id");
+ String password = gvas(ju, "password");
+ pfuv.add(new NodeConfig.ProvFeedUser(fid, login, NodeUtils.getAuthHdr(login, password)));
+ }
+ }
+ JSONArray jeips = jauth.optJSONArray("endpoint_addrs");
+ if (jeips != null) {
+ for (int ix = 0; ix < jeips.length(); ix++) {
+ String sn = gvas(jeips, ix);
+ pfsnv.add(new NodeConfig.ProvFeedSubnet(fid, sn));
+ }
+ }
+ }
+ }
+ JSONArray jsubs = jcfg.optJSONArray("subscriptions");
+ if (jsubs != null) {
+ for (int sx = 0; sx < jsubs.length(); sx++) {
+ JSONObject jsub = jsubs.getJSONObject(sx);
+ if (jsub.optBoolean("suspend", false)) {
+ continue;
+ }
+ String sid = gvas(jsub, "subid");
+ String fid = gvas(jsub, "feedid");
+ JSONObject jdel = jsub.getJSONObject("delivery");
+ String delurl = gvas(jdel, "url");
+ String id = gvas(jdel, "user");
+ String password = gvas(jdel, "password");
+ boolean monly = jsub.getBoolean("metadataOnly");
+ boolean use100 = jdel.getBoolean("use100");
+ psv.add(new NodeConfig.ProvSubscription(sid, fid, delurl, id, NodeUtils.getAuthHdr(id, password), monly, use100));
+ }
+ }
+ JSONObject jparams = jcfg.optJSONObject("parameters");
+ if (jparams != null) {
+ for (String pname : JSONObject.getNames(jparams)) {
+ String pvalue = gvas(jparams, pname);
+ if (pvalue != null) {
+ ppv.add(new NodeConfig.ProvParam(pname, pvalue));
+ }
+ }
+ String sfx = gvas(jparams, "PROV_DOMAIN");
+ JSONArray jnodes = jparams.optJSONArray("NODES");
+ if (jnodes != null) {
+ for (int nx = 0; nx < jnodes.length(); nx++) {
+ String nn = gvas(jnodes, nx);
+ if (nn.indexOf('.') == -1) {
+ nn = nn + "." + sfx;
+ }
+ pnv.add(new NodeConfig.ProvNode(nn));
+ }
+ }
+ }
+ JSONArray jingresses = jcfg.optJSONArray("ingress");
+ if (jingresses != null) {
+ for (int fx = 0; fx < jingresses.length(); fx++) {
+ JSONObject jingress = jingresses.getJSONObject(fx);
+ String fid = gvas(jingress, "feedid");
+ String subnet = gvas(jingress, "subnet");
+ String user = gvas(jingress, "user");
+ String[] nodes = gvasa(jingress, "node");
+ if (fid == null || "".equals(fid)) {
+ continue;
+ }
+ if ("".equals(subnet)) {
+ subnet = null;
+ }
+ if ("".equals(user)) {
+ user = null;
+ }
+ pfiv.add(new NodeConfig.ProvForceIngress(fid, subnet, user, nodes));
+ }
+ }
+ JSONObject jegresses = jcfg.optJSONObject("egress");
+ if (jegresses != null && JSONObject.getNames(jegresses) != null) {
+ for (String esid : JSONObject.getNames(jegresses)) {
+ String enode = gvas(jegresses, esid);
+ if (esid != null && enode != null && !"".equals(esid) && !"".equals(enode)) {
+ pfev.add(new NodeConfig.ProvForceEgress(esid, enode));
+ }
+ }
+ }
+ JSONArray jhops = jcfg.optJSONArray("routing");
+ if (jhops != null) {
+ for (int fx = 0; fx < jhops.length(); fx++) {
+ JSONObject jhop = jhops.getJSONObject(fx);
+ String from = gvas(jhop, "from");
+ String to = gvas(jhop, "to");
+ String via = gvas(jhop, "via");
+ if (from == null || to == null || via == null || "".equals(from) || "".equals(to) || "".equals(via)) {
+ continue;
+ }
+ phv.add(new NodeConfig.ProvHop(from, to, via));
+ }
+ }
+ } catch (JSONException jse) {
+ NodeUtils.setIpAndFqdnForEelf("ProvData");
+ eelflogger.error(EelfMsgs.MESSAGE_PARSING_ERROR, jse.toString());
+ logger.error("NODE0201 Error parsing configuration data from provisioning server " + jse.toString(), jse);
+ throw new IOException(jse.toString(), jse);
+ }
+ pn = pnv.toArray(new NodeConfig.ProvNode[pnv.size()]);
+ pp = ppv.toArray(new NodeConfig.ProvParam[ppv.size()]);
+ pf = pfv.toArray(new NodeConfig.ProvFeed[pfv.size()]);
+ pfu = pfuv.toArray(new NodeConfig.ProvFeedUser[pfuv.size()]);
+ pfsn = pfsnv.toArray(new NodeConfig.ProvFeedSubnet[pfsnv.size()]);
+ ps = psv.toArray(new NodeConfig.ProvSubscription[psv.size()]);
+ pfi = pfiv.toArray(new NodeConfig.ProvForceIngress[pfiv.size()]);
+ pfe = pfev.toArray(new NodeConfig.ProvForceEgress[pfev.size()]);
+ ph = phv.toArray(new NodeConfig.ProvHop[phv.size()]);
+ }
+
+ /**
+ * Get the raw node configuration entries
+ */
+ public NodeConfig.ProvNode[] getNodes() {
+ return (pn);
+ }
+
+ /**
+ * Get the raw parameter configuration entries
+ */
+ public NodeConfig.ProvParam[] getParams() {
+ return (pp);
+ }
+
+ /**
+ * Ge the raw feed configuration entries
+ */
+ public NodeConfig.ProvFeed[] getFeeds() {
+ return (pf);
+ }
+
+ /**
+ * Get the raw feed user configuration entries
+ */
+ public NodeConfig.ProvFeedUser[] getFeedUsers() {
+ return (pfu);
+ }
+
+ /**
+ * Get the raw feed subnet configuration entries
+ */
+ public NodeConfig.ProvFeedSubnet[] getFeedSubnets() {
+ return (pfsn);
+ }
+
+ /**
+ * Get the raw subscription entries
+ */
+ public NodeConfig.ProvSubscription[] getSubscriptions() {
+ return (ps);
+ }
+
+ /**
+ * Get the raw forced ingress entries
+ */
+ public NodeConfig.ProvForceIngress[] getForceIngress() {
+ return (pfi);
+ }
+
+ /**
+ * Get the raw forced egress entries
+ */
+ public NodeConfig.ProvForceEgress[] getForceEgress() {
+ return (pfe);
+ }
+
+ /**
+ * Get the raw next hop entries
+ */
+ public NodeConfig.ProvHop[] getHops() {
+ return (ph);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * Generate publish IDs\r
- */\r
-public class PublishId {\r
- private long nextuid;\r
- private String myname;\r
-\r
- /**\r
- * Generate publish IDs for the specified name\r
- * @param myname Unique identifier for this publish ID generator (usually fqdn of server)\r
- */\r
- public PublishId(String myname) {\r
- this.myname = myname;\r
- }\r
- /**\r
- * Generate a Data Router Publish ID that uniquely identifies the particular invocation of the Publish API for log correlation purposes.\r
- */\r
- public synchronized String next() {\r
- long now = System.currentTimeMillis();\r
- if (now < nextuid) {\r
- now = nextuid;\r
- }\r
- nextuid = now + 1;\r
- return(now + "." + myname);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * Generate publish IDs
+ */
+public class PublishId {
+ private long nextuid;
+ private String myname;
+
+ /**
+ * Generate publish IDs for the specified name
+ *
+ * @param myname Unique identifier for this publish ID generator (usually fqdn of server)
+ */
+ public PublishId(String myname) {
+ this.myname = myname;
+ }
+
+ /**
+ * Generate a Data Router Publish ID that uniquely identifies the particular invocation of the Publish API for log correlation purposes.
+ */
+ public synchronized String next() {
+ long now = System.currentTimeMillis();
+ if (now < nextuid) {
+ now = nextuid;
+ }
+ nextuid = now + 1;
+ return (now + "." + myname);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-\r
-/**\r
- * Execute an operation no more frequently than a specified interval\r
- */\r
-\r
-public abstract class RateLimitedOperation implements Runnable {\r
- private boolean marked; // a timer task exists\r
- private boolean executing; // the operation is currently in progress\r
- private boolean remark; // a request was made while the operation was in progress\r
- private Timer timer;\r
- private long last; // when the last operation started\r
- private long mininterval;\r
- /**\r
- * Create a rate limited operation\r
- * @param mininterval The minimum number of milliseconds after the last execution starts before a new execution can begin\r
- * @param timer The timer used to perform deferred executions\r
- */\r
- public RateLimitedOperation(long mininterval, Timer timer) {\r
- this.timer = timer;\r
- this.mininterval = mininterval;\r
- }\r
- private class deferred extends TimerTask {\r
- public void run() {\r
- execute();\r
- }\r
- }\r
- private synchronized void unmark() {\r
- marked = false;\r
- }\r
- private void execute() {\r
- unmark();\r
- request();\r
- }\r
- /**\r
- * Request that the operation be performed by this thread or at a later time by the timer\r
- */\r
- public void request() {\r
- if (premark()) {\r
- return;\r
- }\r
- do {\r
- run();\r
- } while (demark());\r
- }\r
- private synchronized boolean premark() {\r
- if (executing) {\r
- // currently executing - wait until it finishes\r
- remark = true;\r
- return(true);\r
- }\r
- if (marked) {\r
- // timer currently running - will run when it expires\r
- return(true);\r
- }\r
- long now = System.currentTimeMillis();\r
- if (last + mininterval > now) {\r
- // too soon - schedule a timer\r
- marked = true;\r
- timer.schedule(new deferred(), last + mininterval - now);\r
- return(true);\r
- }\r
- last = now;\r
- executing = true;\r
- // start execution\r
- return(false);\r
- }\r
- private synchronized boolean demark() {\r
- executing = false;\r
- if (remark) {\r
- remark = false;\r
- return(!premark());\r
- }\r
- return(false);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+
+/**
+ * Execute an operation no more frequently than a specified interval
+ */
+
+public abstract class RateLimitedOperation implements Runnable {
+ private boolean marked; // a timer task exists
+ private boolean executing; // the operation is currently in progress
+ private boolean remark; // a request was made while the operation was in progress
+ private Timer timer;
+ private long last; // when the last operation started
+ private long mininterval;
+
+ /**
+ * Create a rate limited operation
+ *
+ * @param mininterval The minimum number of milliseconds after the last execution starts before a new execution can begin
+ * @param timer The timer used to perform deferred executions
+ */
+ public RateLimitedOperation(long mininterval, Timer timer) {
+ this.timer = timer;
+ this.mininterval = mininterval;
+ }
+
+ private class deferred extends TimerTask {
+ public void run() {
+ execute();
+ }
+ }
+
+ private synchronized void unmark() {
+ marked = false;
+ }
+
+ private void execute() {
+ unmark();
+ request();
+ }
+
+ /**
+ * Request that the operation be performed by this thread or at a later time by the timer
+ */
+ public void request() {
+ if (premark()) {
+ return;
+ }
+ do {
+ run();
+ } while (demark());
+ }
+
+ private synchronized boolean premark() {
+ if (executing) {
+ // currently executing - wait until it finishes
+ remark = true;
+ return (true);
+ }
+ if (marked) {
+ // timer currently running - will run when it expires
+ return (true);
+ }
+ long now = System.currentTimeMillis();
+ if (last + mininterval > now) {
+ // too soon - schedule a timer
+ marked = true;
+ timer.schedule(new deferred(), last + mininterval - now);
+ return (true);
+ }
+ last = now;
+ executing = true;
+ // start execution
+ return (false);
+ }
+
+ private synchronized boolean demark() {
+ executing = false;
+ if (remark) {
+ remark = false;
+ return (!premark());
+ }
+ return (false);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-import java.io.*;\r
-\r
-/**\r
- * Track redirections of subscriptions\r
- */\r
-public class RedirManager {\r
- private Hashtable<String, String> sid2primary = new Hashtable<String, String>();\r
- private Hashtable<String, String> sid2secondary = new Hashtable<String, String>();\r
- private String redirfile;\r
- RateLimitedOperation op;\r
- /**\r
- * Create a mechanism for maintaining subscription redirections.\r
- * @param redirfile The file to store the redirection information.\r
- * @param mininterval The minimum number of milliseconds between writes to the redirection information file.\r
- * @param timer The timer thread used to run delayed file writes.\r
- */\r
- public RedirManager(String redirfile, long mininterval, Timer timer) {\r
- this.redirfile = redirfile;\r
- op = new RateLimitedOperation(mininterval, timer) {\r
- public void run() {\r
- try {\r
- StringBuffer sb = new StringBuffer();\r
- for (String s: sid2primary.keySet()) {\r
- sb.append(s).append(' ').append(sid2primary.get(s)).append(' ').append(sid2secondary.get(s)).append('\n');\r
- }\r
- OutputStream os = new FileOutputStream(RedirManager.this.redirfile);\r
- os.write(sb.toString().getBytes());\r
- os.close();\r
- } catch (Exception e) {\r
- }\r
- }\r
- };\r
- try {\r
- String s;\r
- BufferedReader br = new BufferedReader(new FileReader(redirfile));\r
- while ((s = br.readLine()) != null) {\r
- s = s.trim();\r
- String[] sx = s.split(" ");\r
- if (s.startsWith("#") || sx.length != 3) {\r
- continue;\r
- }\r
- sid2primary.put(sx[0], sx[1]);\r
- sid2secondary.put(sx[0], sx[2]);\r
- }\r
- br.close();\r
- } catch (Exception e) {\r
- // missing file is normal\r
- }\r
- }\r
- /**\r
- * Set up redirection. If a request is to be sent to subscription ID sid, and that is configured to go to URL primary, instead, go to secondary.\r
- * @param sid The subscription ID to be redirected\r
- * @param primary The URL associated with that subscription ID\r
- * @param secondary The replacement URL to use instead\r
- */\r
- public synchronized void redirect(String sid, String primary, String secondary) {\r
- sid2primary.put(sid, primary);\r
- sid2secondary.put(sid, secondary);\r
- op.request();\r
- }\r
- /**\r
- * Cancel redirection. If a request is to be sent to subscription ID sid, send it to its primary URL.\r
- * @param sid The subscription ID to remove from the table.\r
- */\r
- public synchronized void forget(String sid) {\r
- sid2primary.remove(sid);\r
- sid2secondary.remove(sid);\r
- op.request();\r
- }\r
- /**\r
- * Look up where to send a subscription. If the primary has changed or there is no redirection, use the primary. Otherwise, redirect to the secondary URL.\r
- * @param sid The subscription ID to look up.\r
- * @param primary The configured primary URL.\r
- * @return The destination URL to really use.\r
- */\r
- public synchronized String lookup(String sid, String primary) {\r
- String oprim = sid2primary.get(sid);\r
- if (primary.equals(oprim)) {\r
- return(sid2secondary.get(sid));\r
- } else if (oprim != null) {\r
- forget(sid);\r
- } \r
- return(primary);\r
- }\r
- /**\r
- * Is a subscription redirected?\r
- */\r
- public synchronized boolean isRedirected(String sid) {\r
- return(sid != null && sid2secondary.get(sid) != null);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+import java.io.*;
+
+/**
+ * Track redirections of subscriptions
+ */
+public class RedirManager {
+ private Hashtable<String, String> sid2primary = new Hashtable<String, String>();
+ private Hashtable<String, String> sid2secondary = new Hashtable<String, String>();
+ private String redirfile;
+ RateLimitedOperation op;
+
+ /**
+ * Create a mechanism for maintaining subscription redirections.
+ *
+ * @param redirfile The file to store the redirection information.
+ * @param mininterval The minimum number of milliseconds between writes to the redirection information file.
+ * @param timer The timer thread used to run delayed file writes.
+ */
+ public RedirManager(String redirfile, long mininterval, Timer timer) {
+ this.redirfile = redirfile;
+ op = new RateLimitedOperation(mininterval, timer) {
+ public void run() {
+ try {
+ StringBuffer sb = new StringBuffer();
+ for (String s : sid2primary.keySet()) {
+ sb.append(s).append(' ').append(sid2primary.get(s)).append(' ').append(sid2secondary.get(s)).append('\n');
+ }
+ OutputStream os = new FileOutputStream(RedirManager.this.redirfile);
+ os.write(sb.toString().getBytes());
+ os.close();
+ } catch (Exception e) {
+ }
+ }
+ };
+ try {
+ String s;
+ BufferedReader br = new BufferedReader(new FileReader(redirfile));
+ while ((s = br.readLine()) != null) {
+ s = s.trim();
+ String[] sx = s.split(" ");
+ if (s.startsWith("#") || sx.length != 3) {
+ continue;
+ }
+ sid2primary.put(sx[0], sx[1]);
+ sid2secondary.put(sx[0], sx[2]);
+ }
+ br.close();
+ } catch (Exception e) {
+ // missing file is normal
+ }
+ }
+
+ /**
+ * Set up redirection. If a request is to be sent to subscription ID sid, and that is configured to go to URL primary, instead, go to secondary.
+ *
+ * @param sid The subscription ID to be redirected
+ * @param primary The URL associated with that subscription ID
+ * @param secondary The replacement URL to use instead
+ */
+ public synchronized void redirect(String sid, String primary, String secondary) {
+ sid2primary.put(sid, primary);
+ sid2secondary.put(sid, secondary);
+ op.request();
+ }
+
+ /**
+ * Cancel redirection. If a request is to be sent to subscription ID sid, send it to its primary URL.
+ *
+ * @param sid The subscription ID to remove from the table.
+ */
+ public synchronized void forget(String sid) {
+ sid2primary.remove(sid);
+ sid2secondary.remove(sid);
+ op.request();
+ }
+
+ /**
+ * Look up where to send a subscription. If the primary has changed or there is no redirection, use the primary. Otherwise, redirect to the secondary URL.
+ *
+ * @param sid The subscription ID to look up.
+ * @param primary The configured primary URL.
+ * @return The destination URL to really use.
+ */
+ public synchronized String lookup(String sid, String primary) {
+ String oprim = sid2primary.get(sid);
+ if (primary.equals(oprim)) {
+ return (sid2secondary.get(sid));
+ } else if (oprim != null) {
+ forget(sid);
+ }
+ return (primary);
+ }
+
+ /**
+ * Is a subscription redirected?
+ */
+ public synchronized boolean isRedirected(String sid) {
+ return (sid != null && sid2secondary.get(sid) != null);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.regex.*;\r
-import java.util.*;\r
-import java.io.*;\r
-import java.nio.file.*;\r
-import java.text.*;\r
-\r
-/**\r
- * Logging for data router delivery events (PUB/DEL/EXP)\r
- */\r
-public class StatusLog {\r
- private static StatusLog instance = new StatusLog();\r
- private HashSet<String> toship = new HashSet<String>();\r
- private SimpleDateFormat filedate;\r
- private String prefix = "logs/events";\r
- private String suffix = ".log";\r
- private String plainfile;\r
- private String curfile;\r
- private long nexttime;\r
- private OutputStream os;\r
- private long intvl;\r
- private NodeConfigManager config = NodeConfigManager.getInstance();\r
- {\r
- try { filedate = new SimpleDateFormat("-yyyyMMddHHmm"); } catch (Exception e) {}\r
- }\r
- /**\r
- * Parse an interval of the form xxhyymzzs and round it to the nearest whole fraction of 24 hours. If no units are specified, assume seconds.\r
- */\r
- public static long parseInterval(String interval, int def) {\r
- try {\r
- Matcher m = Pattern.compile("(?:(\\d+)[Hh])?(?:(\\d+)[Mm])?(?:(\\d+)[Ss]?)?").matcher(interval);\r
- if (m.matches()) {\r
- int dur = 0;\r
- String x = m.group(1);\r
- if (x != null) {\r
- dur += 3600 * Integer.parseInt(x);\r
- }\r
- x = m.group(2);\r
- if (x != null) {\r
- dur += 60 * Integer.parseInt(x);\r
- }\r
- x = m.group(3);\r
- if (x != null) {\r
- dur += Integer.parseInt(x);\r
- }\r
- if (dur < 60) {\r
- dur = 60;\r
- }\r
- int best = 86400;\r
- int dist = best - dur;\r
- if (dur > best) {\r
- dist = dur - best;\r
- }\r
- int base = 1;\r
- for (int i = 0; i < 8; i++) {\r
- int base2 = base;\r
- base *= 2;\r
- for (int j = 0; j < 4; j++) {\r
- int base3 = base2;\r
- base2 *= 3;\r
- for (int k = 0; k < 3; k++) {\r
- int cur = base3;\r
- base3 *= 5;\r
- int ndist = cur - dur;\r
- if (dur > cur) {\r
- ndist = dur - cur;\r
- }\r
- if (ndist < dist) {\r
- best = cur;\r
- dist = ndist;\r
- }\r
- }\r
- }\r
- }\r
- def = best * 1000;\r
- }\r
- } catch (Exception e) {\r
- }\r
- return(def);\r
- }\r
- private synchronized void checkRoll(long now) throws IOException {\r
- if (now >= nexttime) {\r
- if (os != null) {\r
- os.close();\r
- os = null;\r
- }\r
- intvl = parseInterval(config.getEventLogInterval(), 300000);\r
- prefix = config.getEventLogPrefix();\r
- suffix = config.getEventLogSuffix();\r
- nexttime = now - now % intvl + intvl;\r
- curfile = prefix + filedate.format(new Date(nexttime - intvl)) + suffix;\r
- plainfile = prefix + suffix;\r
- notify();\r
- }\r
- }\r
- /**\r
- * Get the name of the current log file\r
- * @return The full path name of the current event log file\r
- */\r
- public static synchronized String getCurLogFile() {\r
- try {\r
- instance.checkRoll(System.currentTimeMillis());\r
- } catch (Exception e) {\r
- }\r
- return(instance.curfile);\r
- }\r
- private synchronized void log(String s) {\r
- try {\r
- long now = System.currentTimeMillis();\r
- checkRoll(now);\r
- if (os == null) {\r
- os = new FileOutputStream(curfile, true);\r
- (new File(plainfile)).delete();\r
- Files.createLink(Paths.get(plainfile), Paths.get(curfile));\r
- }\r
- os.write((NodeUtils.logts(new Date(now)) + '|' + s + '\n').getBytes());\r
- os.flush();\r
- } catch (IOException ioe) {\r
- }\r
- }\r
- /**\r
- * Log a received publication attempt.\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed id given by the publisher\r
- * @param requrl The URL of the received request\r
- * @param method The method (DELETE or PUT) in the received request\r
- * @param ctype The content type (if method is PUT and clen > 0)\r
- * @param clen The content length (if method is PUT)\r
- * @param srcip The IP address of the publisher\r
- * @param user The identity of the publisher\r
- * @param status The status returned to the publisher\r
- */\r
- public static void logPub(String pubid, String feedid, String requrl, String method, String ctype, long clen, String srcip, String user, int status) {\r
- instance.log("PUB|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + srcip + "|" + user + "|" + status);\r
- }\r
- /**\r
- * Log a data transfer error receiving a publication attempt\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed id given by the publisher\r
- * @param requrl The URL of the received request\r
- * @param method The method (DELETE or PUT) in the received request\r
- * @param ctype The content type (if method is PUT and clen > 0)\r
- * @param clen The expected content length (if method is PUT)\r
- * @param rcvd The content length received\r
- * @param srcip The IP address of the publisher\r
- * @param user The identity of the publisher\r
- * @param error The error message from the IO exception\r
- */\r
- public static void logPubFail(String pubid, String feedid, String requrl, String method, String ctype, long clen, long rcvd, String srcip, String user, String error) {\r
- instance.log("PBF|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + rcvd + "|" + srcip + "|" + user + "|" + error);\r
- }\r
- /**\r
- * Log a delivery attempt.\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed ID\r
- * @param subid The (space delimited list of) subscription ID\r
- * @param requrl The URL used in the attempt\r
- * @param method The method (DELETE or PUT) in the attempt\r
- * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)\r
- * @param clen The content length (if PUT and not metaonly)\r
- * @param user The identity given to the subscriber\r
- * @param status The status returned by the subscriber or -1 if an exeception occured trying to connect\r
- * @param xpubid The publish ID returned by the subscriber\r
- */\r
- public static void logDel(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String user, int status, String xpubid) {\r
- if (feedid == null) {\r
- return;\r
- }\r
- instance.log("DEL|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + user + "|" + status + "|" + xpubid);\r
- }\r
- /**\r
- * Log delivery attempts expired\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed ID\r
- * @param subid The (space delimited list of) subscription ID\r
- * @param requrl The URL that would be delivered to\r
- * @param method The method (DELETE or PUT) in the request\r
- * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)\r
- * @param clen The content length (if PUT and not metaonly)\r
- * @param reason The reason the attempts were discontinued\r
- * @param attempts The number of attempts made\r
- */\r
- public static void logExp(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String reason, int attempts) {\r
- if (feedid == null) {\r
- return;\r
- }\r
- instance.log("EXP|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + reason + "|" + attempts);\r
- }\r
- /**\r
- * Log extra statistics about unsuccessful delivery attempts.\r
- * @param pubid The publish ID assigned by the node\r
- * @param feedid The feed ID\r
- * @param subid The (space delimited list of) subscription ID\r
- * @param clen The content length\r
- * @param sent The # of bytes sent or -1 if subscriber returned an error instead of 100 Continue, otherwise, the number of bytes sent before an error occurred.\r
- */\r
- public static void logDelExtra(String pubid, String feedid, String subid, long clen, long sent) {\r
- if (feedid == null) {\r
- return;\r
- }\r
- instance.log("DLX|" + pubid + "|" + feedid + "|" + subid + "|" + clen + "|" + sent);\r
- }\r
- private StatusLog() {\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.regex.*;
+import java.util.*;
+import java.io.*;
+import java.nio.file.*;
+import java.text.*;
+
+/**
+ * Logging for data router delivery events (PUB/DEL/EXP)
+ */
+public class StatusLog {
+ private static StatusLog instance = new StatusLog();
+ private HashSet<String> toship = new HashSet<String>();
+ private SimpleDateFormat filedate;
+ private String prefix = "logs/events";
+ private String suffix = ".log";
+ private String plainfile;
+ private String curfile;
+ private long nexttime;
+ private OutputStream os;
+ private long intvl;
+ private NodeConfigManager config = NodeConfigManager.getInstance();
+
+ {
+ try {
+ filedate = new SimpleDateFormat("-yyyyMMddHHmm");
+ } catch (Exception e) {
+ }
+ }
+
+ /**
+ * Parse an interval of the form xxhyymzzs and round it to the nearest whole fraction of 24 hours. If no units are specified, assume seconds.
+ */
+ public static long parseInterval(String interval, int def) {
+ try {
+ Matcher m = Pattern.compile("(?:(\\d+)[Hh])?(?:(\\d+)[Mm])?(?:(\\d+)[Ss]?)?").matcher(interval);
+ if (m.matches()) {
+ int dur = 0;
+ String x = m.group(1);
+ if (x != null) {
+ dur += 3600 * Integer.parseInt(x);
+ }
+ x = m.group(2);
+ if (x != null) {
+ dur += 60 * Integer.parseInt(x);
+ }
+ x = m.group(3);
+ if (x != null) {
+ dur += Integer.parseInt(x);
+ }
+ if (dur < 60) {
+ dur = 60;
+ }
+ int best = 86400;
+ int dist = best - dur;
+ if (dur > best) {
+ dist = dur - best;
+ }
+ int base = 1;
+ for (int i = 0; i < 8; i++) {
+ int base2 = base;
+ base *= 2;
+ for (int j = 0; j < 4; j++) {
+ int base3 = base2;
+ base2 *= 3;
+ for (int k = 0; k < 3; k++) {
+ int cur = base3;
+ base3 *= 5;
+ int ndist = cur - dur;
+ if (dur > cur) {
+ ndist = dur - cur;
+ }
+ if (ndist < dist) {
+ best = cur;
+ dist = ndist;
+ }
+ }
+ }
+ }
+ def = best * 1000;
+ }
+ } catch (Exception e) {
+ }
+ return (def);
+ }
+
+ private synchronized void checkRoll(long now) throws IOException {
+ if (now >= nexttime) {
+ if (os != null) {
+ os.close();
+ os = null;
+ }
+ intvl = parseInterval(config.getEventLogInterval(), 300000);
+ prefix = config.getEventLogPrefix();
+ suffix = config.getEventLogSuffix();
+ nexttime = now - now % intvl + intvl;
+ curfile = prefix + filedate.format(new Date(nexttime - intvl)) + suffix;
+ plainfile = prefix + suffix;
+ notify();
+ }
+ }
+
+ /**
+ * Get the name of the current log file
+ *
+ * @return The full path name of the current event log file
+ */
+ public static synchronized String getCurLogFile() {
+ try {
+ instance.checkRoll(System.currentTimeMillis());
+ } catch (Exception e) {
+ }
+ return (instance.curfile);
+ }
+
+ private synchronized void log(String s) {
+ try {
+ long now = System.currentTimeMillis();
+ checkRoll(now);
+ if (os == null) {
+ os = new FileOutputStream(curfile, true);
+ (new File(plainfile)).delete();
+ Files.createLink(Paths.get(plainfile), Paths.get(curfile));
+ }
+ os.write((NodeUtils.logts(new Date(now)) + '|' + s + '\n').getBytes());
+ os.flush();
+ } catch (IOException ioe) {
+ }
+ }
+
+ /**
+ * Log a received publication attempt.
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed id given by the publisher
+ * @param requrl The URL of the received request
+ * @param method The method (DELETE or PUT) in the received request
+ * @param ctype The content type (if method is PUT and clen > 0)
+ * @param clen The content length (if method is PUT)
+ * @param srcip The IP address of the publisher
+ * @param user The identity of the publisher
+ * @param status The status returned to the publisher
+ */
+ public static void logPub(String pubid, String feedid, String requrl, String method, String ctype, long clen, String srcip, String user, int status) {
+ instance.log("PUB|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + srcip + "|" + user + "|" + status);
+ }
+
+ /**
+ * Log a data transfer error receiving a publication attempt
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed id given by the publisher
+ * @param requrl The URL of the received request
+ * @param method The method (DELETE or PUT) in the received request
+ * @param ctype The content type (if method is PUT and clen > 0)
+ * @param clen The expected content length (if method is PUT)
+ * @param rcvd The content length received
+ * @param srcip The IP address of the publisher
+ * @param user The identity of the publisher
+ * @param error The error message from the IO exception
+ */
+ public static void logPubFail(String pubid, String feedid, String requrl, String method, String ctype, long clen, long rcvd, String srcip, String user, String error) {
+ instance.log("PBF|" + pubid + "|" + feedid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + rcvd + "|" + srcip + "|" + user + "|" + error);
+ }
+
+ /**
+ * Log a delivery attempt.
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed ID
+ * @param subid The (space delimited list of) subscription ID
+ * @param requrl The URL used in the attempt
+ * @param method The method (DELETE or PUT) in the attempt
+ * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)
+ * @param clen The content length (if PUT and not metaonly)
+ * @param user The identity given to the subscriber
+ * @param status The status returned by the subscriber or -1 if an exeception occured trying to connect
+ * @param xpubid The publish ID returned by the subscriber
+ */
+ public static void logDel(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String user, int status, String xpubid) {
+ if (feedid == null) {
+ return;
+ }
+ instance.log("DEL|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + user + "|" + status + "|" + xpubid);
+ }
+
+ /**
+ * Log delivery attempts expired
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed ID
+ * @param subid The (space delimited list of) subscription ID
+ * @param requrl The URL that would be delivered to
+ * @param method The method (DELETE or PUT) in the request
+ * @param ctype The content type (if method is PUT, not metaonly, and clen > 0)
+ * @param clen The content length (if PUT and not metaonly)
+ * @param reason The reason the attempts were discontinued
+ * @param attempts The number of attempts made
+ */
+ public static void logExp(String pubid, String feedid, String subid, String requrl, String method, String ctype, long clen, String reason, int attempts) {
+ if (feedid == null) {
+ return;
+ }
+ instance.log("EXP|" + pubid + "|" + feedid + "|" + subid + "|" + requrl + "|" + method + "|" + ctype + "|" + clen + "|" + reason + "|" + attempts);
+ }
+
+ /**
+ * Log extra statistics about unsuccessful delivery attempts.
+ *
+ * @param pubid The publish ID assigned by the node
+ * @param feedid The feed ID
+ * @param subid The (space delimited list of) subscription ID
+ * @param clen The content length
+ * @param sent The # of bytes sent or -1 if subscriber returned an error instead of 100 Continue, otherwise, the number of bytes sent before an error occurred.
+ */
+ public static void logDelExtra(String pubid, String feedid, String subid, long clen, long sent) {
+ if (feedid == null) {
+ return;
+ }
+ instance.log("DLX|" + pubid + "|" + feedid + "|" + subid + "|" + clen + "|" + sent);
+ }
+
+ private StatusLog() {
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.net.*;\r
-\r
-/**\r
- * Compare IP addresses as byte arrays to a subnet specified as a CIDR\r
- */\r
-public class SubnetMatcher {\r
- private byte[] sn;\r
- private int len;\r
- private int mask;\r
- /**\r
- * Construct a subnet matcher given a CIDR\r
- * @param subnet The CIDR to match\r
- */\r
- public SubnetMatcher(String subnet) {\r
- int i = subnet.lastIndexOf('/');\r
- if (i == -1) {\r
- sn = NodeUtils.getInetAddress(subnet);\r
- len = sn.length;\r
- } else {\r
- len = Integer.parseInt(subnet.substring(i + 1));\r
- sn = NodeUtils.getInetAddress(subnet.substring(0, i));\r
- mask = ((0xff00) >> (len % 8)) & 0xff;\r
- len /= 8;\r
- }\r
- }\r
- /**\r
- * Is the IP address in the CIDR?\r
- * @param addr the IP address as bytes in network byte order\r
- * @return true if the IP address matches.\r
- */\r
- public boolean matches(byte[] addr) {\r
- if (addr.length != sn.length) {\r
- return(false);\r
- }\r
- for (int i = 0; i < len; i++) {\r
- if (addr[i] != sn[i]) {\r
- return(false);\r
- }\r
- }\r
- if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {\r
- return(false);\r
- }\r
- return(true);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.net.*;
+
+/**
+ * Compare IP addresses as byte arrays to a subnet specified as a CIDR
+ */
+public class SubnetMatcher {
+ private byte[] sn;
+ private int len;
+ private int mask;
+
+ /**
+ * Construct a subnet matcher given a CIDR
+ *
+ * @param subnet The CIDR to match
+ */
+ public SubnetMatcher(String subnet) {
+ int i = subnet.lastIndexOf('/');
+ if (i == -1) {
+ sn = NodeUtils.getInetAddress(subnet);
+ len = sn.length;
+ } else {
+ len = Integer.parseInt(subnet.substring(i + 1));
+ sn = NodeUtils.getInetAddress(subnet.substring(0, i));
+ mask = ((0xff00) >> (len % 8)) & 0xff;
+ len /= 8;
+ }
+ }
+
+ /**
+ * Is the IP address in the CIDR?
+ *
+ * @param addr the IP address as bytes in network byte order
+ * @return true if the IP address matches.
+ */
+ public boolean matches(byte[] addr) {
+ if (addr.length != sn.length) {
+ return (false);
+ }
+ for (int i = 0; i < len; i++) {
+ if (addr[i] != sn[i]) {
+ return (false);
+ }
+ }
+ if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {
+ return (false);
+ }
+ return (true);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-/**\r
- * A destination to deliver a message\r
- */\r
-public class Target {\r
- private DestInfo destinfo;\r
- private String routing;\r
- /**\r
- * A destination to deliver a message\r
- * @param destinfo Either info for a subscription ID or info for a node-to-node transfer\r
- * @param routing For a node-to-node transfer, what to do when it gets there.\r
- */\r
- public Target(DestInfo destinfo, String routing) {\r
- this.destinfo = destinfo;\r
- this.routing = routing;\r
- }\r
- /**\r
- * Add additional routing\r
- */\r
- public void addRouting(String routing) {\r
- this.routing = this.routing + " " + routing;\r
- }\r
- /**\r
- * Get the destination information for this target\r
- */\r
- public DestInfo getDestInfo() {\r
- return(destinfo);\r
- }\r
- /**\r
- * Get the next hop information for this target\r
- */\r
- public String getRouting() {\r
- return(routing);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+/**
+ * A destination to deliver a message
+ */
+public class Target {
+ private DestInfo destinfo;
+ private String routing;
+
+ /**
+ * A destination to deliver a message
+ *
+ * @param destinfo Either info for a subscription ID or info for a node-to-node transfer
+ * @param routing For a node-to-node transfer, what to do when it gets there.
+ */
+ public Target(DestInfo destinfo, String routing) {
+ this.destinfo = destinfo;
+ this.routing = routing;
+ }
+
+ /**
+ * Add additional routing
+ */
+ public void addRouting(String routing) {
+ this.routing = this.routing + " " + routing;
+ }
+
+ /**
+ * Get the destination information for this target
+ */
+ public DestInfo getDestInfo() {
+ return (destinfo);
+ }
+
+ /**
+ * Get the next hop information for this target
+ */
+ public String getRouting() {
+ return (routing);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.node;\r
-\r
-import java.util.*;\r
-\r
-/**\r
- * Manage a list of tasks to be executed when an event occurs.\r
- * This makes the following guarantees:\r
- * <ul>\r
- * <li>Tasks can be safely added and removed in the middle of a run.</li>\r
- * <li>No task will be returned more than once during a run.</li>\r
- * <li>No task will be returned when it is not, at that moment, in the list of tasks.</li>\r
- * <li>At the moment when next() returns null, all tasks on the list have been returned during the run.</li>\r
- * <li>Initially and once next() returns null during a run, next() will continue to return null until startRun() is called.\r
- * </ul>\r
- */\r
-public class TaskList {\r
- private Iterator<Runnable> runlist;\r
- private HashSet<Runnable> tasks = new HashSet<Runnable>();\r
- private HashSet<Runnable> togo;\r
- private HashSet<Runnable> sofar;\r
- private HashSet<Runnable> added;\r
- private HashSet<Runnable> removed;\r
- /**\r
- * Construct a new TaskList\r
- */\r
- public TaskList() {\r
- }\r
- /**\r
- * Start executing the sequence of tasks.\r
- */\r
- public synchronized void startRun() {\r
- sofar = new HashSet<Runnable>();\r
- added = new HashSet<Runnable>();\r
- removed = new HashSet<Runnable>();\r
- togo = new HashSet<Runnable>(tasks);\r
- runlist = togo.iterator();\r
- }\r
- /**\r
- * Get the next task to execute\r
- */\r
- public synchronized Runnable next() {\r
- while (runlist != null) {\r
- if (runlist.hasNext()) {\r
- Runnable task = runlist.next();\r
- if (removed.contains(task)) {\r
- continue;\r
- }\r
- if (sofar.contains(task)) {\r
- continue;\r
- }\r
- sofar.add(task);\r
- return(task);\r
- }\r
- if (added.size() != 0) {\r
- togo = added;\r
- added = new HashSet<Runnable>();\r
- removed.clear();\r
- runlist = togo.iterator();\r
- continue;\r
- }\r
- togo = null;\r
- added = null;\r
- removed = null;\r
- sofar = null;\r
- runlist = null;\r
- }\r
- return(null);\r
- }\r
- /**\r
- * Add a task to the list of tasks to run whenever the event occurs.\r
- */\r
- public synchronized void addTask(Runnable task) {\r
- if (runlist != null) {\r
- added.add(task);\r
- removed.remove(task);\r
- }\r
- tasks.add(task);\r
- }\r
- /**\r
- * Remove a task from the list of tasks to run whenever the event occurs.\r
- */\r
- public synchronized void removeTask(Runnable task) {\r
- if (runlist != null) {\r
- removed.add(task);\r
- added.remove(task);\r
- }\r
- tasks.remove(task);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.node;
+
+import java.util.*;
+
+/**
+ * Manage a list of tasks to be executed when an event occurs.
+ * This makes the following guarantees:
+ * <ul>
+ * <li>Tasks can be safely added and removed in the middle of a run.</li>
+ * <li>No task will be returned more than once during a run.</li>
+ * <li>No task will be returned when it is not, at that moment, in the list of tasks.</li>
+ * <li>At the moment when next() returns null, all tasks on the list have been returned during the run.</li>
+ * <li>Initially and once next() returns null during a run, next() will continue to return null until startRun() is called.
+ * </ul>
+ */
+public class TaskList {
+ private Iterator<Runnable> runlist;
+ private HashSet<Runnable> tasks = new HashSet<Runnable>();
+ private HashSet<Runnable> togo;
+ private HashSet<Runnable> sofar;
+ private HashSet<Runnable> added;
+ private HashSet<Runnable> removed;
+
+ /**
+ * Construct a new TaskList
+ */
+ public TaskList() {
+ }
+
+ /**
+ * Start executing the sequence of tasks.
+ */
+ public synchronized void startRun() {
+ sofar = new HashSet<Runnable>();
+ added = new HashSet<Runnable>();
+ removed = new HashSet<Runnable>();
+ togo = new HashSet<Runnable>(tasks);
+ runlist = togo.iterator();
+ }
+
+ /**
+ * Get the next task to execute
+ */
+ public synchronized Runnable next() {
+ while (runlist != null) {
+ if (runlist.hasNext()) {
+ Runnable task = runlist.next();
+ if (removed.contains(task)) {
+ continue;
+ }
+ if (sofar.contains(task)) {
+ continue;
+ }
+ sofar.add(task);
+ return (task);
+ }
+ if (added.size() != 0) {
+ togo = added;
+ added = new HashSet<Runnable>();
+ removed.clear();
+ runlist = togo.iterator();
+ continue;
+ }
+ togo = null;
+ added = null;
+ removed = null;
+ sofar = null;
+ runlist = null;
+ }
+ return (null);
+ }
+
+ /**
+ * Add a task to the list of tasks to run whenever the event occurs.
+ */
+ public synchronized void addTask(Runnable task) {
+ if (runlist != null) {
+ added.add(task);
+ removed.remove(task);
+ }
+ tasks.add(task);
+ }
+
+ /**
+ * Remove a task from the list of tasks to run whenever the event occurs.
+ */
+ public synchronized void removeTask(Runnable task) {
+ if (runlist != null) {
+ removed.add(task);
+ added.remove(task);
+ }
+ tasks.remove(task);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-package org.onap.dmaap.datarouter.node.eelf;\r
-\r
-import ch.qos.logback.classic.spi.ILoggingEvent;\r
-import ch.qos.logback.core.filter.Filter;\r
-import ch.qos.logback.core.spi.FilterReply;\r
-\r
-/*\r
- * When EELF functionality added it default started logging Jetty logs as well which in turn stopped existing functionality of logging jetty statements in node.log\r
- * added code in logback.xml to add jetty statements in node.log.\r
- * This class removes extran EELF statements from node.log since they are being logged in apicalls.log \r
- */\r
-public class EELFFilter extends Filter<ILoggingEvent>{\r
- @Override\r
- public FilterReply decide(ILoggingEvent event) { \r
- if (event.getMessage().contains("EELF")) {\r
- return FilterReply.DENY;\r
- } else {\r
- return FilterReply.ACCEPT;\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package org.onap.dmaap.datarouter.node.eelf;
+
+import ch.qos.logback.classic.spi.ILoggingEvent;
+import ch.qos.logback.core.filter.Filter;
+import ch.qos.logback.core.spi.FilterReply;
+
+/*
+ * When EELF functionality added it default started logging Jetty logs as well which in turn stopped existing functionality of logging jetty statements in node.log
+ * added code in logback.xml to add jetty statements in node.log.
+ * This class removes extran EELF statements from node.log since they are being logged in apicalls.log
+ */
+public class EELFFilter extends Filter<ILoggingEvent> {
+ @Override
+ public FilterReply decide(ILoggingEvent event) {
+ if (event.getMessage().contains("EELF")) {
+ return FilterReply.DENY;
+ } else {
+ return FilterReply.ACCEPT;
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-package org.onap.dmaap.datarouter.node.eelf;\r
-\r
-import com.att.eelf.i18n.EELFResolvableErrorEnum;\r
-import com.att.eelf.i18n.EELFResourceManager;\r
-\r
-public enum EelfMsgs implements EELFResolvableErrorEnum {\r
- \r
- /**\r
- * Application message prints user (accepts one argument)\r
- */\r
- MESSAGE_WITH_BEHALF,\r
-\r
- /**\r
- * Application message prints user and FeedID (accepts two arguments)\r
- */\r
-\r
- MESSAGE_WITH_BEHALF_AND_FEEDID,\r
- \r
- /**\r
- * Application message prints keystore file error in EELF errors log\r
- */\r
-\r
- MESSAGE_KEYSTORE_LOAD_ERROR,\r
- \r
- /**\r
- * Application message prints Error extracting my name from my keystore file\r
- */\r
-\r
- MESSAGE_KEYSORE_NAME_ERROR, \r
- \r
- /**\r
- * Application message prints Error parsing configuration data from provisioning server.\r
- */\r
-\r
-\r
- MESSAGE_PARSING_ERROR, \r
- \r
- /**\r
- * Application message printsConfiguration failed\r
- */\r
-\r
-\r
- MESSAGE_CONF_FAILED, \r
- \r
- /**\r
- * Application message prints Bad provisioning server URL\r
- */\r
-\r
-\r
- MESSAGE_BAD_PROV_URL, \r
- \r
- /**\r
- * Application message prints Unable to fetch canonical name from keystore file\r
- */\r
-\r
-\r
- MESSAGE_KEYSTORE_FETCH_ERROR,\r
- \r
- /**\r
- * Application message prints Unable to load local configuration file.\r
- */\r
-\r
-\r
- MESSAGE_PROPERTIES_LOAD_ERROR;\r
-\r
- \r
- /**\r
- * Static initializer to ensure the resource bundles for this class are loaded...\r
- * Here this application loads messages from three bundles\r
- */\r
- static {\r
- EELFResourceManager.loadMessageBundle("EelfMessages");\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+package org.onap.dmaap.datarouter.node.eelf;
+
+import com.att.eelf.i18n.EELFResolvableErrorEnum;
+import com.att.eelf.i18n.EELFResourceManager;
+
+public enum EelfMsgs implements EELFResolvableErrorEnum {
+
+ /**
+ * Application message prints user (accepts one argument)
+ */
+ MESSAGE_WITH_BEHALF,
+
+ /**
+ * Application message prints user and FeedID (accepts two arguments)
+ */
+
+ MESSAGE_WITH_BEHALF_AND_FEEDID,
+
+ /**
+ * Application message prints keystore file error in EELF errors log
+ */
+
+ MESSAGE_KEYSTORE_LOAD_ERROR,
+
+ /**
+ * Application message prints Error extracting my name from my keystore file
+ */
+
+ MESSAGE_KEYSORE_NAME_ERROR,
+
+ /**
+ * Application message prints Error parsing configuration data from provisioning server.
+ */
+
+
+ MESSAGE_PARSING_ERROR,
+
+ /**
+ * Application message printsConfiguration failed
+ */
+
+
+ MESSAGE_CONF_FAILED,
+
+ /**
+ * Application message prints Bad provisioning server URL
+ */
+
+
+ MESSAGE_BAD_PROV_URL,
+
+ /**
+ * Application message prints Unable to fetch canonical name from keystore file
+ */
+
+
+ MESSAGE_KEYSTORE_FETCH_ERROR,
+
+ /**
+ * Application message prints Unable to load local configuration file.
+ */
+
+
+ MESSAGE_PROPERTIES_LOAD_ERROR;
+
+
+ /**
+ * Static initializer to ensure the resource bundles for this class are loaded...
+ * Here this application loads messages from three bundles
+ */
+ static {
+ EELFResourceManager.loadMessageBundle("EelfMessages");
+ }
+}
-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- * \r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- * \r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<configuration scan="true" scanPeriod="3 seconds" debug="true">\r
- <!--<jmxConfigurator /> -->\r
- <!-- directory path for all other type logs -->\r
- <!-- property name="logDir" value="/home/eby/dr2/logs" / -->\r
- <property name="logDir" value="/opt/app/datartr/logs" />\r
- \r
- <!-- directory path for debugging type logs -->\r
- <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->\r
- \r
- <!-- specify the component name \r
- <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->\r
- <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->\r
- <!-- property name="componentName" value="logs"></property -->\r
- \r
- <!-- log file names -->\r
- <property name="generalLogName" value="apicalls" />\r
- <!-- name="securityLogName" value="security" -->\r
- <!-- name="performanceLogName" value="performance" -->\r
- <!-- name="serverLogName" value="server" -->\r
- <!-- name="policyLogName" value="policy"-->\r
- <property name="errorLogName" value="errors" />\r
- <!-- name="metricsLogName" value="metrics" -->\r
- <!-- name="auditLogName" value="audit" -->\r
- <!-- name="debugLogName" value="debug" -->\r
- <property name="jettyAndNodeLogName" value="node"></property> \r
- <property name="defaultPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />\r
- <property name="jettyAndNodeLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />\r
- \r
- <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />\r
- \r
- <property name="logDirectory" value="${logDir}" />\r
- <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->\r
- \r
- \r
- <!-- Example evaluator filter applied against console appender -->\r
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
-\r
- <!-- ============================================================================ -->\r
- <!-- EELF Appenders -->\r
- <!-- ============================================================================ -->\r
-\r
- <!-- The EELFAppender is used to record events to the general application \r
- log -->\r
- \r
- \r
- <appender name="EELF"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${generalLogName}.log</file>\r
- <filter class="ch.qos.logback.classic.filter.LevelFilter">\r
- <level>INFO</level>\r
- <onMatch>ACCEPT</onMatch>\r
- <onMismatch>DENY</onMismatch>\r
- </filter>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELF" />\r
- </appender>\r
-\r
- <!-- EELF Security Appender. This appender is used to record security events \r
- to the security log file. Security events are separate from other loggers \r
- in EELF so that security log records can be captured and managed in a secure \r
- way separate from the other logs. This appender is set to never discard any \r
- events. -->\r
- <!--appender name="EELFSecurity"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${securityLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <discardingThreshold>0</discardingThreshold>\r
- <appender-ref ref="EELFSecurity" />\r
- </appender-->\r
-\r
- <!-- EELF Performance Appender. This appender is used to record performance \r
- records. -->\r
- <!--appender name="EELFPerformance"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${performanceLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <outputPatternAsHeader>true</outputPatternAsHeader>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFPerformance" />\r
- </appender-->\r
-\r
- <!-- EELF Server Appender. This appender is used to record Server related \r
- logging events. The Server logger and appender are specializations of the \r
- EELF application root logger and appender. This can be used to segregate Server \r
- events from other components, or it can be eliminated to record these events \r
- as part of the application root log. -->\r
- <!--appender name="EELFServer"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${serverLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFServer" />\r
- </appender-->\r
-\r
- \r
- <!-- EELF Policy Appender. This appender is used to record Policy engine \r
- related logging events. The Policy logger and appender are specializations \r
- of the EELF application root logger and appender. This can be used to segregate \r
- Policy engine events from other components, or it can be eliminated to record \r
- these events as part of the application root log. -->\r
- <!--appender name="EELFPolicy"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${policyLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFPolicy" >\r
- </appender-->\r
- \r
- \r
- <!-- EELF Audit Appender. This appender is used to record audit engine \r
- related logging events. The audit logger and appender are specializations \r
- of the EELF application root logger and appender. This can be used to segregate \r
- Policy engine events from other components, or it can be eliminated to record \r
- these events as part of the application root log. -->\r
- \r
- <!--appender name="EELFAudit"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${auditLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFAudit" />\r
- </appender-->\r
-\r
-<!--appender name="EELFMetrics"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${metricsLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder-->\r
- <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - \r
- %msg%n"</pattern> -->\r
- <!--pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- \r
- <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFMetrics"/>\r
- </appender-->\r
- \r
- <appender name="EELFError"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${errorLogName}.log</file>\r
- <filter class="ch.qos.logback.classic.filter.LevelFilter">\r
- <level>ERROR</level>\r
- <onMatch>ACCEPT</onMatch>\r
- <onMismatch>DENY</onMismatch>\r
- </filter>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFError"/>\r
- </appender>\r
- \r
- <!-- ============================================================================ -->\r
- <appender name="jettyAndNodelog"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${jettyAndNodeLogName}.log</file>\r
- <filter class="org.onap.dmaap.datarouter.node.eelf.EELFFilter" />\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${jettyAndNodeLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${jettyAndNodeLoggerPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFjettyAndNodelog" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="jettyAndNodelog" />\r
- <includeCallerData>true</includeCallerData>\r
- </appender>\r
- \r
- <!-- ============================================================================ -->\r
-\r
-\r
- <!--appender name="EELFDebug"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${debugLogDirectory}/${debugLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${debugLoggerPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFDebug" />\r
- <includeCallerData>true</includeCallerData>\r
- </appender-->\r
- \r
- \r
- <!-- ============================================================================ -->\r
- <!-- EELF loggers -->\r
- <!-- ============================================================================ -->\r
- <logger name="com.att.eelf" level="info" additivity="false">\r
- <appender-ref ref="asyncEELF" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.error" level="error" additivity="false">\r
- <appender-ref ref="asyncEELFError" />\r
- </logger>\r
- \r
- <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">\r
- <appender-ref ref="asyncEELFjettyAndNodelog"/>\r
- </logger> \r
- \r
- <!-- logger name="com.att.eelf.security" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFSecurity" /> \r
- </logger>\r
- <logger name="com.att.eelf.perf" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFPerformance" />\r
- </logger>\r
- <logger name="com.att.eelf.server" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFServer" />\r
- </logger>\r
- <logger name="com.att.eelf.policy" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFPolicy" />\r
- </logger>\r
-\r
- <logger name="com.att.eelf.audit" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFAudit" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.metrics" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFMetrics" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.debug" level="debug" additivity="false">\r
- <appender-ref ref="asyncEELFDebug" />\r
- </logger-->\r
-\r
- \r
-\r
- \r
- <root level="INFO">\r
- <appender-ref ref="asyncEELF" />\r
- <appender-ref ref="asyncEELFError" />\r
- <appender-ref ref="asyncEELFjettyAndNodelog" />\r
- </root>\r
-\r
-</configuration>\r
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<configuration scan="true" scanPeriod="3 seconds" debug="true">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <!-- property name="logDir" value="/home/eby/dr2/logs" / -->
+ <property name="logDir" value="/opt/app/datartr/logs" />
+
+ <!-- directory path for debugging type logs -->
+ <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->
+
+ <!-- specify the component name
+ <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->
+ <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->
+ <!-- property name="componentName" value="logs"></property -->
+
+ <!-- log file names -->
+ <property name="generalLogName" value="apicalls" />
+ <!-- name="securityLogName" value="security" -->
+ <!-- name="performanceLogName" value="performance" -->
+ <!-- name="serverLogName" value="server" -->
+ <!-- name="policyLogName" value="policy"-->
+ <property name="errorLogName" value="errors" />
+ <!-- name="metricsLogName" value="metrics" -->
+ <!-- name="auditLogName" value="audit" -->
+ <!-- name="debugLogName" value="debug" -->
+ <property name="jettyAndNodeLogName" value="node"></property>
+ <property name="defaultPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />
+ <property name="jettyAndNodeLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />
+
+ <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />
+
+ <property name="logDirectory" value="${logDir}" />
+ <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->
+
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+ <!-- EELF Security Appender. This appender is used to record security events
+ to the security log file. Security events are separate from other loggers
+ in EELF so that security log records can be captured and managed in a secure
+ way separate from the other logs. This appender is set to never discard any
+ events. -->
+ <!--appender name="EELFSecurity"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${securityLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="EELFSecurity" />
+ </appender-->
+
+ <!-- EELF Performance Appender. This appender is used to record performance
+ records. -->
+ <!--appender name="EELFPerformance"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${performanceLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <outputPatternAsHeader>true</outputPatternAsHeader>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPerformance" />
+ </appender-->
+
+ <!-- EELF Server Appender. This appender is used to record Server related
+ logging events. The Server logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate Server
+ events from other components, or it can be eliminated to record these events
+ as part of the application root log. -->
+ <!--appender name="EELFServer"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${serverLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFServer" />
+ </appender-->
+
+
+ <!-- EELF Policy Appender. This appender is used to record Policy engine
+ related logging events. The Policy logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+ <!--appender name="EELFPolicy"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${policyLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPolicy" >
+ </appender-->
+
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+
+ <!--appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender-->
+
+<!--appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder-->
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} -
+ %msg%n"</pattern> -->
+ <!--pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics"/>
+ </appender-->
+
+ <appender name="EELFError"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFError"/>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <appender name="jettyAndNodelog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${jettyAndNodeLogName}.log</file>
+ <filter class="org.onap.dmaap.datarouter.node.eelf.EELFFilter" />
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${jettyAndNodeLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${jettyAndNodeLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFjettyAndNodelog" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="jettyAndNodelog" />
+ <includeCallerData>true</includeCallerData>
+ </appender>
+
+ <!-- ============================================================================ -->
+
+
+ <!--appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${debugLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${debugLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>true</includeCallerData>
+ </appender-->
+
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ </logger>
+
+ <logger name="com.att.eelf.error" level="error" additivity="false">
+ <appender-ref ref="asyncEELFError" />
+ </logger>
+
+ <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">
+ <appender-ref ref="asyncEELFjettyAndNodelog"/>
+ </logger>
+
+ <!-- logger name="com.att.eelf.security" level="info" additivity="false">
+ <appender-ref ref="asyncEELFSecurity" />
+ </logger>
+ <logger name="com.att.eelf.perf" level="info" additivity="false">
+ <appender-ref ref="asyncEELFPerformance" />
+ </logger>
+ <logger name="com.att.eelf.server" level="info" additivity="false">
+ <appender-ref ref="asyncEELFServer" />
+ </logger>
+ <logger name="com.att.eelf.policy" level="info" additivity="false">
+ <appender-ref ref="asyncEELFPolicy" />
+ </logger>
+
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <logger name="com.att.eelf.debug" level="debug" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+ </logger-->
+
+
+
+
+ <root level="INFO">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFError" />
+ <appender-ref ref="asyncEELFjettyAndNodelog" />
+ </root>
+
+</configuration>
-<?xml version="1.0" encoding="utf-8"?>\r
-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- * \r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- * \r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<descriptor version="1" xmlns="http://aft.att.com/swm/descriptor">\r
- <platforms>\r
- <platform os="Linux" osVersions="*" architecture="*"/>\r
- </platforms>\r
- <paths>\r
- <path name="/opt/app/datartr" user="datartr" group="datartr" permissions="755,644" recursive="true"/>\r
- <path name="/opt/app/platform/init.d/drtrnode" user="datartr" group="datartr" permissions="755"/>\r
- </paths>\r
- <actions>\r
- <action type="INIT">\r
- <proc stage="POST" user="datartr" group="datartr"/>\r
- </action>\r
- <action type="FALL">\r
- <proc stage="PRE" user="datartr" group="datartr"/>\r
- <proc stage="POST" user="datartr" group="datartr"/>\r
- </action>\r
- <action type="INST">\r
- <proc stage="PRE" user="datartr" group="datartr"/>\r
- <proc stage="POST" user="datartr" group="datartr"/>\r
- </action>\r
- <action type="DINST">\r
- <proc stage="PRE" user="datartr" group="datartr"/>\r
- </action>\r
- </actions>\r
- <dependencies>\r
- <dependencyFilter componentName="com.att.java:jdk8lin" versions="[1.8.0.77-02]" sequence="1"/>\r
- <dependencyFilter componentName="com.att.platform:initd" versions="[1.0.15,)" sequence="2"/>\r
- <dependencyFilter componentName="com.att.dmaap.datarouter:util" versions="[1.0.7,)" sequence="3"/>\r
- </dependencies>\r
-</descriptor>\r
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<descriptor version="1" xmlns="http://aft.att.com/swm/descriptor">
+ <platforms>
+ <platform os="Linux" osVersions="*" architecture="*"/>
+ </platforms>
+ <paths>
+ <path name="/opt/app/datartr" user="datartr" group="datartr" permissions="755,644" recursive="true"/>
+ <path name="/opt/app/platform/init.d/drtrnode" user="datartr" group="datartr" permissions="755"/>
+ </paths>
+ <actions>
+ <action type="INIT">
+ <proc stage="POST" user="datartr" group="datartr"/>
+ </action>
+ <action type="FALL">
+ <proc stage="PRE" user="datartr" group="datartr"/>
+ <proc stage="POST" user="datartr" group="datartr"/>
+ </action>
+ <action type="INST">
+ <proc stage="PRE" user="datartr" group="datartr"/>
+ <proc stage="POST" user="datartr" group="datartr"/>
+ </action>
+ <action type="DINST">
+ <proc stage="PRE" user="datartr" group="datartr"/>
+ </action>
+ </actions>
+ <dependencies>
+ <dependencyFilter componentName="com.att.java:jdk8lin" versions="[1.8.0.77-02]" sequence="1"/>
+ <dependencyFilter componentName="com.att.platform:initd" versions="[1.0.15,)" sequence="2"/>
+ <dependencyFilter componentName="com.att.dmaap.datarouter:util" versions="[1.0.7,)" sequence="3"/>
+ </dependencies>
+</descriptor>
do
case "$action" in
'backup')
- cp log4j.properties log4j.properties.save 2>/dev/null
- cp node.properties node.properties.save 2>/dev/null
- cp havecert havecert.save 2>/dev/null
- ;;
+ cp log4j.properties log4j.properties.save 2>/dev/null
+ cp node.properties node.properties.save 2>/dev/null
+ cp havecert havecert.save 2>/dev/null
+ ;;
'stop')
- /opt/app/platform/init.d/drtrnode stop
- ;;
+ /opt/app/platform/init.d/drtrnode stop
+ ;;
'start')
- /opt/app/platform/init.d/drtrnode start || exit 1
- ;;
+ /opt/app/platform/init.d/drtrnode start || exit 1
+ ;;
'config')
- /bin/bash log4j.properties.tmpl >log4j.properties
- /bin/bash node.properties.tmpl >node.properties
- /bin/bash havecert.tmpl >havecert
- echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.node
- chmod +x havecert
- rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
- ln -s ../init.d/drtrnode /opt/app/platform/rc.d/K90drtrnode
- ln -s ../init.d/drtrnode /opt/app/platform/rc.d/S10drtrnode
- ;;
+ /bin/bash log4j.properties.tmpl >log4j.properties
+ /bin/bash node.properties.tmpl >node.properties
+ /bin/bash havecert.tmpl >havecert
+ echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.node
+ chmod +x havecert
+ rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
+ ln -s ../init.d/drtrnode /opt/app/platform/rc.d/K90drtrnode
+ ln -s ../init.d/drtrnode /opt/app/platform/rc.d/S10drtrnode
+ ;;
'restore')
- cp log4j.properties.save log4j.properties 2>/dev/null
- cp node.properties.save node.properties 2>/dev/null
- cp havecert.save havecert 2>/dev/null
- ;;
+ cp log4j.properties.save log4j.properties 2>/dev/null
+ cp node.properties.save node.properties 2>/dev/null
+ cp havecert.save havecert 2>/dev/null
+ ;;
'clean')
- rm -f log4j.properties node.properties havecert log4j.properties.save node.properties.save havecert.save SHUTDOWN redirections.dat VERSION.node
- rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
- ;;
+ rm -f log4j.properties node.properties havecert log4j.properties.save node.properties.save havecert.save SHUTDOWN redirections.dat VERSION.node
+ rm -f /opt/app/platform/rc.d/K90drtrnode /opt/app/platform/rc.d/S10drtrnode
+ ;;
*)
- exit 1
- ;;
+ exit 1
+ ;;
esac
done
exit 0
export TZ
PATH=/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/java/jdk/jdk180/bin
export PATH
-CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
+CLASSPATH=`echo /opt/app/datartr/etc /opt/app/datartr/lib/*.jar | tr ' ' ':'`
export CLASSPATH
pids() {
- ps -ef | grep java | grep node.NodeMain | sed -e 's/[^ ]* *//' -e 's/ .*//'
+ ps -ef | grep java | grep node.NodeMain | sed -e 's/[^ ]* *//' -e 's/ .*//'
}
start() {
- ID=`id -n -u`
- GRP=`id -n -g`
- if [ "$ID" != "root" ]
- then
- echo drtrnode must be started as user datartr not $ID
- exit 1
- fi
- if [ "$GRP" != "datartr" ]
- then
- echo drtrnode must be started as group datartr not $GRP
- exit 1
- fi
- cd /opt/app/datartr
- if etc/havecert
- then
- echo >/dev/null
- else
- echo No certificate file available. Cannot start
- exit 0
- fi
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- echo drtrnode already running
- exit 0
- fi
+ ID=`id -n -u`
+ GRP=`id -n -g`
+ if [ "$ID" != "root" ]
+ then
+ echo drtrnode must be started as user datartr not $ID
+ exit 1
+ fi
+ if [ "$GRP" != "datartr" ]
+ then
+ echo drtrnode must be started as group datartr not $GRP
+ exit 1
+ fi
+ cd /opt/app/datartr
+ if etc/havecert
+ then
+ echo >/dev/null
+ else
+ echo No certificate file available. Cannot start
+ exit 0
+ fi
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ echo drtrnode already running
+ exit 0
+ fi
- mkdir -p /opt/app/datartr/spool/s
- chmod 755 /opt/app/datartr/spool/s
+ mkdir -p /opt/app/datartr/spool/s
+ chmod 755 /opt/app/datartr/spool/s
- rm -f /opt/app/datartr/etc/SHUTDOWN
- nohup java org.onap.dmaap.datarouter.node.NodeMain </dev/null >/dev/null 2>&1 &
- sleep 5
- PIDS=`pids`
- if [ "$PIDS" = "" ]
- then
- echo drtrnode startup failed
- else
- echo drtrnode started
- fi
+ rm -f /opt/app/datartr/etc/SHUTDOWN
+ nohup java org.onap.dmaap.datarouter.node.NodeMain </dev/null >/dev/null 2>&1 &
+ sleep 5
+ PIDS=`pids`
+ if [ "$PIDS" = "" ]
+ then
+ echo drtrnode startup failed
+ else
+ echo drtrnode started
+ fi
}
stop() {
- ID=`id -n -u`
- GRP=`id -n -g`
- if [ "$ID" != "datartr" ]
- then
- echo drtrnode must be stopped as user datartr not $ID
- exit 1
- fi
- if [ "$GRP" != "datartr" ]
- then
- echo drtrnode must be stopped as group datartr not $GRP
- exit 1
- fi
- touch /opt/app/datartr/etc/SHUTDOWN
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- sleep 5
- kill -9 $PIDS
- sleep 5
- echo drtrnode stopped
- else
- echo drtrnode not running
- fi
+ ID=`id -n -u`
+ GRP=`id -n -g`
+ if [ "$ID" != "datartr" ]
+ then
+ echo drtrnode must be stopped as user datartr not $ID
+ exit 1
+ fi
+ if [ "$GRP" != "datartr" ]
+ then
+ echo drtrnode must be stopped as group datartr not $GRP
+ exit 1
+ fi
+ touch /opt/app/datartr/etc/SHUTDOWN
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ sleep 5
+ kill -9 $PIDS
+ sleep 5
+ echo drtrnode stopped
+ else
+ echo drtrnode not running
+ fi
}
status() {
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- echo drtrnode running
- else
- echo drtrnode not running
- fi
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ echo drtrnode running
+ else
+ echo drtrnode not running
+ fi
}
case "$1" in
'start')
- start
- ;;
+ start
+ ;;
'stop')
- stop
- ;;
+ stop
+ ;;
'restart')
- stop
- sleep 20
- start
- ;;
+ stop
+ sleep 20
+ start
+ ;;
'status')
- status
- ;;
+ status
+ ;;
*)
- echo "Usage: $0 { start | stop | restart }"
- exit 1
- ;;
+ echo "Usage: $0 { start | stop | restart }"
+ exit 1
+ ;;
esac
exit 0
cd /opt/app/datartr;
if [ -f ${DRTR_NODE_KSTOREFILE:-etc/keystore} ]
then
- exit 0
+ exit 0
fi
echo `date '+%F %T,000'` WARN Certificate file "${DRTR_NODE_KSTOREFILE:-etc/keystore}" is missing >>${DRTR_NODE_LOGS:-logs}/node.log
exit 1
This component is for the Data Router Node software.
The following pre-requisite components should already be present:
- com.att.aft.swm:swm-cli
- com.att.aft.swm:swm-node
- - SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
- com.att.platform:uam-auto
- com.att.java:jdk8lin
- com.att.platform:initd
- com.att.platform:port-fwd
- - SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
- com.att.dmaap.datarouter:util
+ com.att.aft.swm:swm-cli
+ com.att.aft.swm:swm-node
+ - SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
+ com.att.platform:uam-auto
+ com.att.java:jdk8lin
+ com.att.platform:initd
+ com.att.platform:port-fwd
+ - SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
+ com.att.dmaap.datarouter:util
In a non-production environment, the URL for fetching provisioning data from
the provisioning server must be overridden. This can be done by setting a SWM
variable prior to installing this component. The production (default) value for
this variable is:
- DRTR_PROV_INTURL=https://feeds-drtr.web.att.com/internal/prov
+ DRTR_PROV_INTURL=https://feeds-drtr.web.att.com/internal/prov
Similarly, the URL for uploading event logs to the log server must be overridden. This can also be done by setting a SWM variable. The production (default) value is:
- DRTR_LOG_URL=https://feeds-drtr.web.att.com/internal/logs
+ DRTR_LOG_URL=https://feeds-drtr.web.att.com/internal/logs
Other SWM variables that can be set are:
DRTR_NODE_INTHTTPPORT (default 8080)
- The TCP/IP port number the component should listen on for "go fetch"
- requests from the provisioning server
+ The TCP/IP port number the component should listen on for "go fetch"
+ requests from the provisioning server
DRTR_NODE_INTHTTPSPORT (default 8443)
- The TCP/IP port number the component should listen on for publish
- requests from feed publishers and other nodes
+ The TCP/IP port number the component should listen on for publish
+ requests from feed publishers and other nodes
DRTR_NODE_EXTHTTPSPORT (default 443)
- The TCP/IP port number the component should use for node-to-node
- transfers and for sending redirect requests back to publishers
+ The TCP/IP port number the component should use for node-to-node
+ transfers and for sending redirect requests back to publishers
DRTR_NODE_SPOOL (default /opt/app/datartr/spool)
- The directory where data files should be saved while in transit
+ The directory where data files should be saved while in transit
DRTR_NODE_LOGS (default /opt/app/datartr/logs)
- The directory where log files should be kept
+ The directory where log files should be kept
DRTR_NODE_LOG_RETENTION (default 30)
- How long a log file is kept before being deleted
+ How long a log file is kept before being deleted
DRTR_NODE_KSTOREFILE (default /opt/app/datartr/etc/keystore)
- The java keystore file containing the server certificate and private key
- for this server
+ The java keystore file containing the server certificate and private key
+ for this server
DRTR_NODE_KSTOREPASS (default changeit)
- The password for the keystore file
+ The password for the keystore file
DRTR_NODE_PVTKEYPASS (default changeit)
- The password for the private key in the keystore file
+ The password for the private key in the keystore file
DRTR_NODE_TSTOREFILE (by default, use the truststore from the Java JDK)
- The java keystore file containing the trusted certificate authority
- certificates
+ The java keystore file containing the trusted certificate authority
+ certificates
DRTR_NODE_TSTOREPASS (default changeit)
- The password for the trust store file. Only applies if a trust store
- file is specified.
+ The password for the trust store file. Only applies if a trust store
+ file is specified.
-\r
-{\r
- "name": "Jettydemo",\r
- "version": "m1.0",\r
- "description": "Jettydemo",\r
- "business_description": "Jettydemo",\r
- "suspend": false,\r
- "deleted": false,\r
- "changeowner": true,\r
- "authorization": {\r
- "classification": "unclassified",\r
- "endpoint_addrs": [\r
- "172.18.0.3",\r
- ],\r
- "endpoint_ids": [\r
- {\r
- "password": "rs873m",\r
- "id": "rs873m"\r
- }\r
- ]\r
- },\r
-}\r
-\r
+
+{
+ "name": "Jettydemo",
+ "version": "m1.0",
+ "description": "Jettydemo",
+ "business_description": "Jettydemo",
+ "suspend": false,
+ "deleted": false,
+ "changeowner": true,
+ "authorization": {
+ "classification": "unclassified",
+ "endpoint_addrs": [
+ "172.18.0.3",
+ ],
+ "endpoint_ids": [
+ {
+ "password": "rs873m",
+ "id": "rs873m"
+ }
+ ]
+ },
+}
+
-\r
-{ \r
- "delivery" : \r
- \r
- { \r
- "url" : "http://172.18.0.3:7070/", \r
- "user" : "LOGIN", \r
- "password" : "PASSWORD", \r
- "use100" : true \r
- },\r
- "metadataOnly" : false, \r
- "suspend" : false, \r
- "groupid" : 29,\r
- "subscriber" : "sg481n"\r
-}\r
+
+{
+ "delivery" :
+
+ {
+ "url" : "http://172.18.0.3:7070/",
+ "user" : "LOGIN",
+ "password" : "PASSWORD",
+ "use100" : true
+ },
+ "metadataOnly" : false,
+ "suspend" : false,
+ "groupid" : 29,
+ "subscriber" : "sg481n"
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* The <code>AuthorizationResponse</code> interface gives the caller access to information about an authorization\r
* decision. This information includes the permit/deny decision itself, along with supplementary information in the form of\r
* advice and obligations. (The advice and obligations will not be used in Data Router R1.)\r
- * \r
+ *\r
* @author J. F. Lucas\r
*\r
*/\r
public interface AuthorizationResponse {\r
- /**\r
- * Indicates whether the request is authorized or not.\r
- * \r
- * @return a boolean flag that is <code>true</code> if the request is permitted, and <code>false</code> otherwise.\r
- */\r
- public boolean isAuthorized();\r
- \r
- /**\r
- * Returns any advice elements that were included in the authorization response.\r
- * \r
- * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
- * advice element from the authorization response.\r
- */\r
- public List<AuthorizationResponseSupplement> getAdvice();\r
- \r
- /**\r
- * Returns any obligation elements that were included in the authorization response.\r
- * \r
- * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
- * obligation element from the authorization response.\r
- */\r
- public List<AuthorizationResponseSupplement> getObligations();\r
+ /**\r
+ * Indicates whether the request is authorized or not.\r
+ *\r
+ * @return a boolean flag that is <code>true</code> if the request is permitted, and <code>false</code> otherwise.\r
+ */\r
+ public boolean isAuthorized();\r
+\r
+ /**\r
+ * Returns any advice elements that were included in the authorization response.\r
+ *\r
+ * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
+ * advice element from the authorization response.\r
+ */\r
+ public List<AuthorizationResponseSupplement> getAdvice();\r
+\r
+ /**\r
+ * Returns any obligation elements that were included in the authorization response.\r
+ *\r
+ * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
+ * obligation element from the authorization response.\r
+ */\r
+ public List<AuthorizationResponseSupplement> getObligations();\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* obligation elements. The structure of a XACML advice element and a XACML obligation element are similar: each has an identifier and\r
* a set of attributes (name-value) pairs. (The difference between a XACML advice element and a XACML obligation element is in\r
* how the recipient of the response--the Policy Enforcement Point, in XACML terminology--handles the element.)\r
- * \r
+ *\r
* @author J. F. Lucas\r
*\r
*/\r
public interface AuthorizationResponseSupplement {\r
- /** Return the identifier for the supplementary information element.\r
- * \r
- * @return a <code>String</code> containing the identifier.\r
- */\r
- public String getId();\r
- \r
- /** Return the attributes for the supplementary information element, as a <code>Map</code> in which\r
- * keys represent attribute identifiers and values represent attribute values.\r
- * \r
- * @return attributes for the supplementary information element.\r
- */\r
- public Map<String, String> getAttributes();\r
+ /** Return the identifier for the supplementary information element.\r
+ *\r
+ * @return a <code>String</code> containing the identifier.\r
+ */\r
+ public String getId();\r
+\r
+ /** Return the attributes for the supplementary information element, as a <code>Map</code> in which\r
+ * keys represent attribute identifiers and values represent attribute values.\r
+ *\r
+ * @return attributes for the supplementary information element.\r
+ */\r
+ public Map<String, String> getAttributes();\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* request is permitted. In Data Router R1, the classes that implement the <code>Authorizer</code> interface will have\r
* local logic that makes the authorization decision. After R1, these classes will instead have logic that creates XACML\r
* authorization requests, sends these requests to a Policy Decision Point (PDP), and parses the XACML responses.\r
- * \r
+ *\r
* @author J. F. Lucas\r
*\r
*/\r
public interface Authorizer {\r
- /**\r
- * Determine if the API request carried in the <code>request</code> parameter is permitted.\r
- * \r
- * @param request the HTTP request for which an authorization decision is needed\r
- * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the\r
- * permit/deny decision for the request and (after R1) supplemental information related to the response in the form\r
- * of advice and obligations.\r
- */\r
- public AuthorizationResponse decide(HttpServletRequest request);\r
- \r
- /**\r
- * Determine if the API request carried in the <code>request</code> parameter, with additional attributes provided in\r
- * the <code>additionalAttrs</code> parameter, is permitted.\r
- * \r
- * @param request the HTTP request for which an authorization decision is needed\r
- * @param additionalAttrs additional attributes that the <code>Authorizer</code> can in making an authorization decision\r
- * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the\r
- * permit/deny decision for the request and (after R1) supplemental information related to the response in the form\r
- * of advice and obligations.\r
- */\r
- public AuthorizationResponse decide(HttpServletRequest request, Map<String,String> additionalAttrs);\r
+ /**\r
+ * Determine if the API request carried in the <code>request</code> parameter is permitted.\r
+ *\r
+ * @param request the HTTP request for which an authorization decision is needed\r
+ * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the\r
+ * permit/deny decision for the request and (after R1) supplemental information related to the response in the form\r
+ * of advice and obligations.\r
+ */\r
+ public AuthorizationResponse decide(HttpServletRequest request);\r
+\r
+ /**\r
+ * Determine if the API request carried in the <code>request</code> parameter, with additional attributes provided in\r
+ * the <code>additionalAttrs</code> parameter, is permitted.\r
+ *\r
+ * @param request the HTTP request for which an authorization decision is needed\r
+ * @param additionalAttrs additional attributes that the <code>Authorizer</code> can in making an authorization decision\r
+ * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the\r
+ * permit/deny decision for the request and (after R1) supplemental information related to the response in the form\r
+ * of advice and obligations.\r
+ */\r
+ public AuthorizationResponse decide(HttpServletRequest request, Map<String,String> additionalAttrs);\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*\r
*/\r
public class AuthRespImpl implements AuthorizationResponse {\r
- private boolean authorized;\r
- private List<AuthorizationResponseSupplement> advice;\r
- private List<AuthorizationResponseSupplement> obligations;\r
- \r
- /** Constructor. This version will not be used in Data Router R1 since we will not have advice and obligations.\r
- * \r
- * @param authorized flag indicating whether the response carried a permit response (<code>true</code>) \r
- * or something else (<code>false</code>).\r
- * @param advice list of advice elements returned in the response.\r
- * @param obligations list of obligation elements returned in the response.\r
- */\r
- public AuthRespImpl(boolean authorized, List<AuthorizationResponseSupplement> advice, List<AuthorizationResponseSupplement> obligations) {\r
- this.authorized = authorized;\r
- this.advice = (advice == null ? null : new ArrayList<AuthorizationResponseSupplement> (advice));\r
- this.obligations = (obligations == null ? null : new ArrayList<AuthorizationResponseSupplement> (obligations));\r
- }\r
- \r
- /** Constructor. Simple version for authorization responses that have no advice and no obligations.\r
- * \r
- * @param authorized flag indicating whether the response carried a permit (<code>true</code>) or something else (<code>false</code>).\r
- */\r
- public AuthRespImpl(boolean authorized) {\r
- this(authorized, null, null);\r
- }\r
+ private boolean authorized;\r
+ private List<AuthorizationResponseSupplement> advice;\r
+ private List<AuthorizationResponseSupplement> obligations;\r
+\r
+ /** Constructor. This version will not be used in Data Router R1 since we will not have advice and obligations.\r
+ *\r
+ * @param authorized flag indicating whether the response carried a permit response (<code>true</code>)\r
+ * or something else (<code>false</code>).\r
+ * @param advice list of advice elements returned in the response.\r
+ * @param obligations list of obligation elements returned in the response.\r
+ */\r
+ public AuthRespImpl(boolean authorized, List<AuthorizationResponseSupplement> advice, List<AuthorizationResponseSupplement> obligations) {\r
+ this.authorized = authorized;\r
+ this.advice = (advice == null ? null : new ArrayList<AuthorizationResponseSupplement> (advice));\r
+ this.obligations = (obligations == null ? null : new ArrayList<AuthorizationResponseSupplement> (obligations));\r
+ }\r
+\r
+ /** Constructor. Simple version for authorization responses that have no advice and no obligations.\r
+ *\r
+ * @param authorized flag indicating whether the response carried a permit (<code>true</code>) or something else (<code>false</code>).\r
+ */\r
+ public AuthRespImpl(boolean authorized) {\r
+ this(authorized, null, null);\r
+ }\r
\r
- /**\r
- * Indicates whether the request is authorized or not.\r
- * \r
- * @return a boolean flag that is <code>true</code> if the request is permitted, and <code>false</code> otherwise.\r
- */\r
- @Override\r
- public boolean isAuthorized() {\r
- return authorized;\r
- }\r
+ /**\r
+ * Indicates whether the request is authorized or not.\r
+ *\r
+ * @return a boolean flag that is <code>true</code> if the request is permitted, and <code>false</code> otherwise.\r
+ */\r
+ @Override\r
+ public boolean isAuthorized() {\r
+ return authorized;\r
+ }\r
\r
- /**\r
- * Returns any advice elements that were included in the authorization response.\r
- * \r
- * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
- * advice element from the authorization response.\r
- */\r
- @Override\r
- public List<AuthorizationResponseSupplement> getAdvice() {\r
- return advice;\r
- }\r
+ /**\r
+ * Returns any advice elements that were included in the authorization response.\r
+ *\r
+ * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
+ * advice element from the authorization response.\r
+ */\r
+ @Override\r
+ public List<AuthorizationResponseSupplement> getAdvice() {\r
+ return advice;\r
+ }\r
\r
- /**\r
- * Returns any obligation elements that were included in the authorization response.\r
- * \r
- * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
- * obligation element from the authorization response.\r
- */\r
- @Override\r
- public List<AuthorizationResponseSupplement> getObligations() {\r
- return obligations;\r
- }\r
+ /**\r
+ * Returns any obligation elements that were included in the authorization response.\r
+ *\r
+ * @return A list of objects implementing the <code>AuthorizationResponseSupplement</code> interface, with each object representing an\r
+ * obligation element from the authorization response.\r
+ */\r
+ @Override\r
+ public List<AuthorizationResponseSupplement> getObligations() {\r
+ return obligations;\r
+ }\r
\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*\r
*/\r
public class AuthRespSupplementImpl implements AuthorizationResponseSupplement {\r
- \r
- private String id = null;\r
- private Map<String, String> attributes = null;\r
\r
- /** Constructor, available within the package.\r
- * \r
- * @param id The identifier for the advice or obligation element\r
- * @param attributes The attributes (name-value pairs) for the advice or obligation element.\r
- */\r
- AuthRespSupplementImpl (String id, Map<String, String> attributes) {\r
- this.id = id;\r
- this.attributes = new HashMap<String,String>(attributes);\r
- }\r
+ private String id = null;\r
+ private Map<String, String> attributes = null;\r
+\r
+ /** Constructor, available within the package.\r
+ *\r
+ * @param id The identifier for the advice or obligation element\r
+ * @param attributes The attributes (name-value pairs) for the advice or obligation element.\r
+ */\r
+ AuthRespSupplementImpl (String id, Map<String, String> attributes) {\r
+ this.id = id;\r
+ this.attributes = new HashMap<String,String>(attributes);\r
+ }\r
\r
- /** Return the identifier for the supplementary information element.\r
- * \r
- * @return a <code>String</code> containing the identifier.\r
- */\r
- @Override\r
- public String getId() {\r
- return id;\r
- }\r
+ /** Return the identifier for the supplementary information element.\r
+ *\r
+ * @return a <code>String</code> containing the identifier.\r
+ */\r
+ @Override\r
+ public String getId() {\r
+ return id;\r
+ }\r
\r
- /** Return the attributes for the supplementary information element, as a <code>Map</code> in which\r
- * keys represent attribute identifiers and values represent attribute values.\r
- * \r
- * @return attributes for the supplementary information element.\r
- */\r
- @Override\r
- public Map<String, String> getAttributes() {\r
- return attributes;\r
- }\r
+ /** Return the attributes for the supplementary information element, as a <code>Map</code> in which\r
+ * keys represent attribute identifiers and values represent attribute values.\r
+ *\r
+ * @return attributes for the supplementary information element.\r
+ */\r
+ @Override\r
+ public Map<String, String> getAttributes() {\r
+ return attributes;\r
+ }\r
\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* <li>a subscription resource, the target of GET, PUT, and DELETE requests used to manage an existing subscription.\r
* Each subscription has a unique subscription ID.\r
* </li>\r
- * \r
+ *\r
* @author J. F. Lucas\r
*\r
*/\r
public class AuthzResource {\r
- private ResourceType type = null;\r
- private String id = "";\r
+ private ResourceType type = null;\r
+ private String id = "";\r
+\r
+ /* Construct an AuthzResource by matching a request URI against the various patterns */\r
+ public AuthzResource(String rURI) {\r
+ if (rURI != null) {\r
+ for (ResourceType t : ResourceType.values()) {\r
+ Matcher m = t.getPattern().matcher(rURI);\r
+ if (m.find(0)) {\r
+ this.type = t;\r
+ if (m.group("id") != null) {\r
+ this.id = m.group("id");\r
+ }\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ public ResourceType getType() {\r
+ return this.type;\r
+ }\r
+\r
+ public String getId() {\r
+ return this.id;\r
+ }\r
+\r
+ /* Enumeration that helps turn a request URI into something more useful for\r
+ * authorization purposes by given a type name and a pattern for determining if the URI\r
+ * represents that resource type.\r
+ * Highly dependent on the URL scheme, could be parameterized.\r
+ */\r
+ public enum ResourceType {\r
+ FEEDS_COLLECTION("((://[^/]+/)|(^/))(?<id>)$"),\r
+ SUBS_COLLECTION ("((://[^/]+/)|(^/{0,1}))subscribe/(?<id>[^/]+)$"),\r
+ FEED("((://[^/]+/)|(^/{0,1}))feed/(?<id>[^/]+)$"),\r
+ SUB("((://[^/]+/)|(^/{0,1}))subs/(?<id>[^/]+)$");\r
+\r
+ private Pattern uriPattern;\r
+\r
+ private ResourceType(String patternString) {\r
+ this.uriPattern = Pattern.compile(patternString);\r
+ }\r
\r
- /* Construct an AuthzResource by matching a request URI against the various patterns */\r
- public AuthzResource(String rURI) {\r
- if (rURI != null) {\r
- for (ResourceType t : ResourceType.values()) {\r
- Matcher m = t.getPattern().matcher(rURI);\r
- if (m.find(0)) {\r
- this.type = t;\r
- if (m.group("id") != null) {\r
- this.id = m.group("id");\r
- }\r
- break;\r
- }\r
- }\r
- }\r
- }\r
- \r
- public ResourceType getType() {\r
- return this.type;\r
- }\r
- \r
- public String getId() {\r
- return this.id;\r
- }\r
- \r
- /* Enumeration that helps turn a request URI into something more useful for\r
- * authorization purposes by given a type name and a pattern for determining if the URI\r
- * represents that resource type.\r
- * Highly dependent on the URL scheme, could be parameterized.\r
- */\r
- public enum ResourceType { \r
- FEEDS_COLLECTION("((://[^/]+/)|(^/))(?<id>)$"), \r
- SUBS_COLLECTION ("((://[^/]+/)|(^/{0,1}))subscribe/(?<id>[^/]+)$"),\r
- FEED("((://[^/]+/)|(^/{0,1}))feed/(?<id>[^/]+)$"),\r
- SUB("((://[^/]+/)|(^/{0,1}))subs/(?<id>[^/]+)$");\r
- \r
- private Pattern uriPattern;\r
- \r
- private ResourceType(String patternString) {\r
- this.uriPattern = Pattern.compile(patternString);\r
- }\r
- \r
- Pattern getPattern() {\r
- return this.uriPattern;\r
- }\r
- }\r
+ Pattern getPattern() {\r
+ return this.uriPattern;\r
+ }\r
+ }\r
}\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-package org.onap.dmaap.datarouter.authz.impl;\r
-\r
-import java.util.Map;\r
-\r
-import javax.servlet.http.HttpServletRequest;\r
-\r
-import org.apache.log4j.Logger;\r
-import org.onap.dmaap.datarouter.authz.AuthorizationResponse;\r
-import org.onap.dmaap.datarouter.authz.Authorizer;\r
-import org.onap.dmaap.datarouter.authz.impl.AuthzResource.ResourceType;\r
-\r
-/** Authorizer for the provisioning API for Data Router R1\r
- * \r
- * @author J. F. Lucas\r
- *\r
- */\r
-public class ProvAuthorizer implements Authorizer {\r
- \r
- private Logger log;\r
- private ProvDataProvider provData;\r
- \r
- private static final String SUBJECT_HEADER = "X-ATT-DR-ON-BEHALF-OF"; // HTTP header carrying requester identity\r
- private static final String SUBJECT_HEADER_GROUP = "X-ATT-DR-ON-BEHALF-OF-GROUP"; // HTTP header carrying requester identity by group Rally : US708115\r
- /** Constructor. For the moment, do nothing special. Make it a singleton? \r
- * \r
- */\r
- public ProvAuthorizer(ProvDataProvider provData) {\r
- this.provData = provData;\r
- this.log = Logger.getLogger(this.getClass());\r
- }\r
- \r
- /**\r
- * Determine if the API request carried in the <code>request</code> parameter is permitted.\r
- * \r
- * @param request the HTTP request for which an authorization decision is needed\r
- * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the\r
- * permit/deny decision for the request and (after R1) supplemental information related to the response in the form\r
- * of advice and obligations.\r
- */\r
- @Override\r
- public AuthorizationResponse decide(HttpServletRequest request) {\r
- return this.decide(request, null);\r
- }\r
- \r
- /**\r
- * Determine if the API request carried in the <code>request</code> parameter, with additional attributes provided in\r
- * the <code>additionalAttrs</code> parameter, is permitted. <code>additionalAttrs</code> isn't used in R1.\r
- * \r
- * @param request the HTTP request for which an authorization decision is needed\r
- * @param additionalAttrs additional attributes that the <code>Authorizer</code> can in making an authorization decision\r
- * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the\r
- * permit/deny decision for the request and (after R1) supplemental information related to the response in the form\r
- * of advice and obligations.\r
- */\r
- @Override\r
- public AuthorizationResponse decide(HttpServletRequest request,\r
- Map<String, String> additionalAttrs) {\r
- log.trace ("Entering decide()");\r
- \r
- boolean decision = false;\r
- \r
- // Extract interesting parts of the HTTP request\r
- String method = request.getMethod();\r
- AuthzResource resource = new AuthzResource(request.getRequestURI());\r
- String subject = (request.getHeader(SUBJECT_HEADER)); // identity of the requester\r
- String subjectgroup = (request.getHeader(SUBJECT_HEADER_GROUP)); // identity of the requester by group Rally : US708115\r
-\r
- log.trace("Method: " + method + " -- Type: " + resource.getType() + " -- Id: " + resource.getId() + \r
- " -- Subject: " + subject);\r
- \r
- // Choose authorization method based on the resource type\r
- ResourceType resourceType = resource.getType();\r
- if (resourceType != null) {\r
-\r
- switch (resourceType) {\r
-\r
- case FEEDS_COLLECTION:\r
- decision = allowFeedsCollectionAccess(resource, method, subject, subjectgroup);\r
- break;\r
-\r
- case SUBS_COLLECTION:\r
- decision = allowSubsCollectionAccess(resource, method, subject, subjectgroup);\r
- break;\r
-\r
- case FEED:\r
- decision = allowFeedAccess(resource, method, subject, subjectgroup);\r
- break;\r
-\r
- case SUB:\r
- decision = allowSubAccess(resource, method, subject, subjectgroup);\r
- break;\r
-\r
- default:\r
- decision = false;\r
- break;\r
- }\r
- }\r
- log.debug("Exit decide(): " + method + "|" + resourceType + "|" + resource.getId() + "|" + subject + " ==> " + decision);\r
- \r
- return new AuthRespImpl(decision);\r
- }\r
- \r
- private boolean allowFeedsCollectionAccess(AuthzResource resource, String method, String subject, String subjectgroup) {\r
- \r
- // Allow GET or POST unconditionally\r
- return method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("POST"));\r
- }\r
- \r
- private boolean allowSubsCollectionAccess(AuthzResource resource, String method, String subject, String subjectgroup) {\r
- \r
- // Allow GET or POST unconditionally\r
- return method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("POST"));\r
- }\r
- \r
- private boolean allowFeedAccess(AuthzResource resource, String method, String subject, String subjectgroup) {\r
- boolean decision = false;\r
- \r
- // Allow GET, PUT, or DELETE if requester (subject) is the owner (publisher) of the feed\r
- if ( method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("PUT") ||\r
- method.equalsIgnoreCase("DELETE"))) {\r
- \r
- String owner = provData.getFeedOwner(resource.getId());\r
- decision = (owner != null) && owner.equals(subject);\r
- \r
- //Verifying by group Rally : US708115\r
- if(subjectgroup != null) { \r
- String feedowner = provData.getGroupByFeedGroupId(subject, resource.getId());\r
- decision = (feedowner != null) && feedowner.equals(subjectgroup);\r
- }\r
- }\r
- \r
- return decision;\r
- }\r
- \r
- private boolean allowSubAccess(AuthzResource resource, String method, String subject, String subjectgroup) {\r
- boolean decision = false;\r
- \r
- // Allow GET, PUT, or DELETE if requester (subject) is the owner of the subscription (subscriber)\r
- if (method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("PUT") || \r
- method.equalsIgnoreCase("DELETE") || method.equalsIgnoreCase("POST"))) {\r
- \r
- String owner = provData.getSubscriptionOwner(resource.getId());\r
- decision = (owner != null) && owner.equals(subject);\r
- \r
- //Verifying by group Rally : US708115\r
- if(subjectgroup != null) {\r
- String feedowner = provData.getGroupBySubGroupId(subject, resource.getId());\r
- decision = (feedowner != null) && feedowner.equals(subjectgroup);\r
- }\r
- }\r
- \r
- return decision;\r
- }\r
-\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package org.onap.dmaap.datarouter.authz.impl;
+
+import java.util.Map;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.log4j.Logger;
+import org.onap.dmaap.datarouter.authz.AuthorizationResponse;
+import org.onap.dmaap.datarouter.authz.Authorizer;
+import org.onap.dmaap.datarouter.authz.impl.AuthzResource.ResourceType;
+
+/** Authorizer for the provisioning API for Data Router R1
+ *
+ * @author J. F. Lucas
+ *
+ */
+public class ProvAuthorizer implements Authorizer {
+
+ private Logger log;
+ private ProvDataProvider provData;
+
+ private static final String SUBJECT_HEADER = "X-ATT-DR-ON-BEHALF-OF"; // HTTP header carrying requester identity
+ private static final String SUBJECT_HEADER_GROUP = "X-ATT-DR-ON-BEHALF-OF-GROUP"; // HTTP header carrying requester identity by group Rally : US708115
+ /** Constructor. For the moment, do nothing special. Make it a singleton?
+ *
+ */
+ public ProvAuthorizer(ProvDataProvider provData) {
+ this.provData = provData;
+ this.log = Logger.getLogger(this.getClass());
+ }
+
+ /**
+ * Determine if the API request carried in the <code>request</code> parameter is permitted.
+ *
+ * @param request the HTTP request for which an authorization decision is needed
+ * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the
+ * permit/deny decision for the request and (after R1) supplemental information related to the response in the form
+ * of advice and obligations.
+ */
+ @Override
+ public AuthorizationResponse decide(HttpServletRequest request) {
+ return this.decide(request, null);
+ }
+
+ /**
+ * Determine if the API request carried in the <code>request</code> parameter, with additional attributes provided in
+ * the <code>additionalAttrs</code> parameter, is permitted. <code>additionalAttrs</code> isn't used in R1.
+ *
+ * @param request the HTTP request for which an authorization decision is needed
+ * @param additionalAttrs additional attributes that the <code>Authorizer</code> can in making an authorization decision
+ * @return an object implementing the <code>AuthorizationResponse</code> interface. This object includes the
+ * permit/deny decision for the request and (after R1) supplemental information related to the response in the form
+ * of advice and obligations.
+ */
+ @Override
+ public AuthorizationResponse decide(HttpServletRequest request,
+ Map<String, String> additionalAttrs) {
+ log.trace ("Entering decide()");
+
+ boolean decision = false;
+
+ // Extract interesting parts of the HTTP request
+ String method = request.getMethod();
+ AuthzResource resource = new AuthzResource(request.getRequestURI());
+ String subject = (request.getHeader(SUBJECT_HEADER)); // identity of the requester
+ String subjectgroup = (request.getHeader(SUBJECT_HEADER_GROUP)); // identity of the requester by group Rally : US708115
+
+ log.trace("Method: " + method + " -- Type: " + resource.getType() + " -- Id: " + resource.getId() +
+ " -- Subject: " + subject);
+
+ // Choose authorization method based on the resource type
+ ResourceType resourceType = resource.getType();
+ if (resourceType != null) {
+
+ switch (resourceType) {
+
+ case FEEDS_COLLECTION:
+ decision = allowFeedsCollectionAccess(resource, method, subject, subjectgroup);
+ break;
+
+ case SUBS_COLLECTION:
+ decision = allowSubsCollectionAccess(resource, method, subject, subjectgroup);
+ break;
+
+ case FEED:
+ decision = allowFeedAccess(resource, method, subject, subjectgroup);
+ break;
+
+ case SUB:
+ decision = allowSubAccess(resource, method, subject, subjectgroup);
+ break;
+
+ default:
+ decision = false;
+ break;
+ }
+ }
+ log.debug("Exit decide(): " + method + "|" + resourceType + "|" + resource.getId() + "|" + subject + " ==> " + decision);
+
+ return new AuthRespImpl(decision);
+ }
+
+ private boolean allowFeedsCollectionAccess(AuthzResource resource, String method, String subject, String subjectgroup) {
+
+ // Allow GET or POST unconditionally
+ return method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("POST"));
+ }
+
+ private boolean allowSubsCollectionAccess(AuthzResource resource, String method, String subject, String subjectgroup) {
+
+ // Allow GET or POST unconditionally
+ return method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("POST"));
+ }
+
+ private boolean allowFeedAccess(AuthzResource resource, String method, String subject, String subjectgroup) {
+ boolean decision = false;
+
+ // Allow GET, PUT, or DELETE if requester (subject) is the owner (publisher) of the feed
+ if ( method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("PUT") ||
+ method.equalsIgnoreCase("DELETE"))) {
+
+ String owner = provData.getFeedOwner(resource.getId());
+ decision = (owner != null) && owner.equals(subject);
+
+ //Verifying by group Rally : US708115
+ if(subjectgroup != null) {
+ String feedowner = provData.getGroupByFeedGroupId(subject, resource.getId());
+ decision = (feedowner != null) && feedowner.equals(subjectgroup);
+ }
+ }
+
+ return decision;
+ }
+
+ private boolean allowSubAccess(AuthzResource resource, String method, String subject, String subjectgroup) {
+ boolean decision = false;
+
+ // Allow GET, PUT, or DELETE if requester (subject) is the owner of the subscription (subscriber)
+ if (method != null && (method.equalsIgnoreCase("GET") || method.equalsIgnoreCase("PUT") ||
+ method.equalsIgnoreCase("DELETE") || method.equalsIgnoreCase("POST"))) {
+
+ String owner = provData.getSubscriptionOwner(resource.getId());
+ decision = (owner != null) && owner.equals(subject);
+
+ //Verifying by group Rally : US708115
+ if(subjectgroup != null) {
+ String feedowner = provData.getGroupBySubGroupId(subject, resource.getId());
+ decision = (feedowner != null) && feedowner.equals(subjectgroup);
+ }
+ }
+
+ return decision;
+ }
+
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
******************************************************************************/\r
package org.onap.dmaap.datarouter.authz.impl;\r
\r
-/** Interface to access data about subscriptions and feeds. A software component that \r
+/** Interface to access data about subscriptions and feeds. A software component that\r
* uses the <code>ProvAuthorizer</code> needs to supply an implementation of this interface.\r
* @author J. F. Lucas\r
*\r
*/\r
public interface ProvDataProvider {\r
- \r
- /** Get the identity of the owner of a feed.\r
- * \r
- * @param feedId the feed ID of the feed whose owner is being looked up.\r
- * @return the feed owner's identity\r
- */\r
- public String getFeedOwner(String feedId);\r
- \r
- /** Get the security classification of a feed.\r
- * \r
- * @param feedId the ID of the feed whose classification is being looked up.\r
- * @return the classification of the feed.\r
- */\r
- public String getFeedClassification(String feedId);\r
- \r
- /** Get the identity of the owner of a feed\r
- * \r
- * @param subId the ID of the subscripition whose owner is being looked up.\r
- * @return the subscription owner's identity.\r
- */\r
- public String getSubscriptionOwner(String subId);\r
\r
- /** Get the identity of the owner of a feed by group id - Rally : US708115\r
- * \r
- * @param feedId, user the ID of the feed whose owner is being looked up.\r
- * @return the feed owner's identity by group.\r
- */\r
- public String getGroupByFeedGroupId(String owner, String feedId);\r
- \r
- /** Get the identity of the owner of a sub by group id Rally : US708115\r
- * \r
- * @param subId, user the ID of the feed whose owner is being looked up.\r
- * @return the feed owner's identity by group.\r
- */\r
- public String getGroupBySubGroupId(String owner, String subId);\r
+ /** Get the identity of the owner of a feed.\r
+ *\r
+ * @param feedId the feed ID of the feed whose owner is being looked up.\r
+ * @return the feed owner's identity\r
+ */\r
+ public String getFeedOwner(String feedId);\r
+\r
+ /** Get the security classification of a feed.\r
+ *\r
+ * @param feedId the ID of the feed whose classification is being looked up.\r
+ * @return the classification of the feed.\r
+ */\r
+ public String getFeedClassification(String feedId);\r
+\r
+ /** Get the identity of the owner of a feed\r
+ *\r
+ * @param subId the ID of the subscripition whose owner is being looked up.\r
+ * @return the subscription owner's identity.\r
+ */\r
+ public String getSubscriptionOwner(String subId);\r
+\r
+ /** Get the identity of the owner of a feed by group id - Rally : US708115\r
+ *\r
+ * @param feedId, user the ID of the feed whose owner is being looked up.\r
+ * @return the feed owner's identity by group.\r
+ */\r
+ public String getGroupByFeedGroupId(String owner, String feedId);\r
+\r
+ /** Get the identity of the owner of a sub by group id Rally : US708115\r
+ *\r
+ * @param subId, user the ID of the feed whose owner is being looked up.\r
+ * @return the feed owner's identity by group.\r
+ */\r
+ public String getGroupBySubGroupId(String owner, String subId);\r
}\r
# ============LICENSE_START==================================================\r
# * org.onap.dmaap\r
# * ===========================================================================\r
-# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
+# * Copyright � 2017 AT&T Intellectual Property. All rights reserved.\r
# * ===========================================================================\r
# * Licensed under the Apache License, Version 2.0 (the "License");\r
# * you may not use this file except in compliance with the License.\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;\r
-\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;\r
-import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;\r
-\r
-import java.io.IOException;\r
-import java.io.InputStream;\r
-import java.net.InetAddress;\r
-import java.net.UnknownHostException;\r
-import java.security.cert.X509Certificate;\r
-import java.sql.Connection;\r
-import java.sql.SQLException;\r
-import java.util.HashMap;\r
-import java.util.HashSet;\r
-import java.util.Map;\r
-import java.util.Set;\r
-import java.util.List;\r
-import java.util.ArrayList;\r
-\r
-import javax.servlet.ServletConfig;\r
-import javax.servlet.ServletException;\r
-import javax.servlet.http.HttpServlet;\r
-import javax.servlet.http.HttpServletRequest;\r
-\r
-import org.apache.log4j.Logger;\r
-import org.json.JSONObject;\r
-import org.json.JSONTokener;\r
-import org.onap.dmaap.datarouter.authz.Authorizer;\r
-import org.onap.dmaap.datarouter.authz.impl.ProvAuthorizer;\r
-import org.onap.dmaap.datarouter.authz.impl.ProvDataProvider;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Deleteable;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Group;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Insertable;\r
-import org.onap.dmaap.datarouter.provisioning.beans.NodeClass;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Parameters;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Subscription;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Updateable;\r
-import org.onap.dmaap.datarouter.provisioning.utils.DB;\r
-import org.onap.dmaap.datarouter.provisioning.utils.ThrottleFilter;\r
-import org.json.JSONException; \r
-import org.slf4j.MDC;\r
-\r
-import java.util.Properties;\r
-import java.util.regex.Pattern;\r
-import javax.mail.Message;\r
-import javax.mail.MessagingException;\r
-import javax.mail.Multipart;\r
-import javax.mail.Session;\r
-import javax.mail.Transport;\r
-import javax.mail.internet.AddressException;\r
-import javax.mail.internet.InternetAddress;\r
-import javax.mail.internet.MimeBodyPart;\r
-import javax.mail.internet.MimeMessage;\r
-import javax.mail.internet.MimeMultipart;\r
-/**\r
- * This is the base class for all Servlets in the provisioning code.\r
- * It provides standard constants and some common methods.\r
- *\r
- * @author Robert Eby\r
- * @version $Id: BaseServlet.java,v 1.16 2014/03/12 19:45:40 eby Exp $\r
- */\r
-@SuppressWarnings("serial")\r
-public class BaseServlet extends HttpServlet implements ProvDataProvider {\r
- public static final String BEHALF_HEADER = "X-ATT-DR-ON-BEHALF-OF";\r
- public static final String FEED_BASECONTENT_TYPE = "application/vnd.att-dr.feed";\r
- public static final String FEED_CONTENT_TYPE = "application/vnd.att-dr.feed; version=2.0";\r
- public static final String FEEDFULL_CONTENT_TYPE = "application/vnd.att-dr.feed-full; version=2.0";\r
- public static final String FEEDLIST_CONTENT_TYPE = "application/vnd.att-dr.feed-list; version=1.0";\r
- public static final String SUB_BASECONTENT_TYPE = "application/vnd.att-dr.subscription";\r
- public static final String SUB_CONTENT_TYPE = "application/vnd.att-dr.subscription; version=2.0";\r
- public static final String SUBFULL_CONTENT_TYPE = "application/vnd.att-dr.subscription-full; version=2.0";\r
- public static final String SUBLIST_CONTENT_TYPE = "application/vnd.att-dr.subscription-list; version=1.0";\r
-\r
- \r
- //Adding groups functionality, ...1610\r
- public static final String GROUP_BASECONTENT_TYPE = "application/vnd.att-dr.group";\r
- public static final String GROUP_CONTENT_TYPE = "application/vnd.att-dr.group; version=2.0";\r
- public static final String GROUPFULL_CONTENT_TYPE = "application/vnd.att-dr.group-full; version=2.0";\r
- public static final String GROUPLIST_CONTENT_TYPE = "application/vnd.att-dr.fegrouped-list; version=1.0";\r
-\r
-\r
- public static final String LOGLIST_CONTENT_TYPE = "application/vnd.att-dr.log-list; version=1.0";\r
- public static final String PROVFULL_CONTENT_TYPE1 = "application/vnd.att-dr.provfeed-full; version=1.0";\r
- public static final String PROVFULL_CONTENT_TYPE2 = "application/vnd.att-dr.provfeed-full; version=2.0";\r
- public static final String CERT_ATTRIBUTE = "javax.servlet.request.X509Certificate";\r
-\r
- public static final String DB_PROBLEM_MSG = "There has been a problem with the DB. It is suggested you try the operation again.";\r
-\r
- public static final int DEFAULT_MAX_FEEDS = 10000;\r
- public static final int DEFAULT_MAX_SUBS = 100000;\r
- public static final int DEFAULT_POKETIMER1 = 5;\r
- public static final int DEFAULT_POKETIMER2 = 30;\r
- public static final String DEFAULT_DOMAIN = "web.att.com";\r
- public static final String DEFAULT_PROVSRVR_NAME = "feeds-drtr.web.att.com";\r
- public static final String RESEARCH_SUBNET = "135.207.136.128/25";\r
- public static final String STATIC_ROUTING_NODES = ""; //Adding new param for static Routing - Rally:US664862-1610\r
-\r
- /** A boolean to trigger one time "provisioning changed" event on startup */\r
- private static boolean startmsg_flag = true;\r
- /** This POD should require SSL connections from clients; pulled from the DB (PROV_REQUIRE_SECURE) */\r
- private static boolean require_secure = true;\r
- /** This POD should require signed, recognized certificates from clients; pulled from the DB (PROV_REQUIRE_CERT) */\r
- private static boolean require_cert = true;\r
- /** The set of authorized addresses and networks; pulled from the DB (PROV_AUTH_ADDRESSES) */\r
- private static Set<String> authorizedAddressesAndNetworks = new HashSet<String>();\r
- /** The set of authorized names; pulled from the DB (PROV_AUTH_SUBJECTS) */\r
- private static Set<String> authorizedNames = new HashSet<String>();\r
- /** The FQDN of the initially "active" provisioning server in this Data Router ecosystem */\r
- private static String initial_active_pod;\r
- /** The FQDN of the initially "standby" provisioning server in this Data Router ecosystem */\r
- private static String initial_standby_pod;\r
- /** The FQDN of this provisioning server in this Data Router ecosystem */\r
- private static String this_pod;\r
- /** "Timer 1" - used to determine when to notify nodes of provisioning changes */\r
- private static long poke_timer1;\r
- /** "Timer 2" - used to determine when to notify nodes of provisioning changes */\r
- private static long poke_timer2;\r
- /** Array of nodes names and/or FQDNs */\r
- private static String[] nodes = new String[0];\r
- /** Array of node IP addresses */\r
- private static InetAddress[] nodeAddresses = new InetAddress[0];\r
- /** Array of POD IP addresses */\r
- private static InetAddress[] podAddresses = new InetAddress[0];\r
- /** The maximum number of feeds allowed; pulled from the DB (PROV_MAXFEED_COUNT) */\r
- protected static int max_feeds = 0;\r
- /** The maximum number of subscriptions allowed; pulled from the DB (PROV_MAXSUB_COUNT) */\r
- protected static int max_subs = 0;\r
- /** The current number of feeds in the system */\r
- protected static int active_feeds = 0;\r
- /** The current number of subscriptions in the system */\r
- protected static int active_subs = 0;\r
- /** The domain used to generate a FQDN from the "bare" node names */\r
- public static String prov_domain = "web.att.com";\r
- /** The standard FQDN of the provisioning server in this Data Router ecosystem */\r
- public static String prov_name = "feeds-drtr.web.att.com";\r
- /** The standard FQDN of the ACTIVE provisioning server in this Data Router ecosystem */\r
- public static String active_prov_name = "feeds-drtr.web.att.com";\r
- /** Special subnet that is allowed access to /internal */\r
- protected static String special_subnet = RESEARCH_SUBNET;\r
-\r
- /** Special subnet that is allowed access to /internal to Lab Machine */\r
- protected static String special_subnet_secondary = RESEARCH_SUBNET;\r
- protected static String static_routing_nodes = STATIC_ROUTING_NODES; //Adding new param for static Routing - Rally:US664862-1610\r
-\r
- /** This logger is used to log provisioning events */\r
- protected static Logger eventlogger;\r
- /** This logger is used to log internal events (errors, etc.) */\r
- protected static Logger intlogger;\r
- /** Authorizer - interface to the Policy Engine */\r
- protected static Authorizer authz;\r
- /** The Synchronizer used to sync active DB to standby one */\r
- protected static SynchronizerTask synctask = null;\r
- \r
- //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- private InetAddress thishost;\r
- private InetAddress loopback;\r
- private static Boolean mailSendFlag = false;\r
-\r
- public static final String MAILCONFIG_FILE = "mail.properties";\r
- private static Properties mailprops;\r
- /**\r
- * Initialize data common to all the provisioning server servlets.\r
- */\r
- protected BaseServlet() {\r
- if (eventlogger == null)\r
- eventlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.events");\r
- if (intlogger == null)\r
- intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- if (authz == null)\r
- authz = new ProvAuthorizer(this);\r
- if (startmsg_flag) {\r
- startmsg_flag = false;\r
- provisioningParametersChanged();\r
- }\r
- if (synctask == null) {\r
- synctask = SynchronizerTask.getSynchronizer();\r
- }\r
- String name = this.getClass().getName();\r
- intlogger.info("PROV0002 Servlet "+name+" started.");\r
- }\r
- @Override\r
- public void init(ServletConfig config) throws ServletException {\r
- super.init(config);\r
- try {\r
- thishost = InetAddress.getLocalHost();\r
- loopback = InetAddress.getLoopbackAddress();\r
- checkHttpsRelaxation(); //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- } catch (UnknownHostException e) {\r
- // ignore\r
- }\r
- }\r
- protected int getIdFromPath(HttpServletRequest req) {\r
- String path = req.getPathInfo();\r
- if (path == null || path.length() < 2)\r
- return -1;\r
- try {\r
- return Integer.parseInt(path.substring(1));\r
- } catch (NumberFormatException e) {\r
- return -1;\r
- }\r
- }\r
- /**\r
- * Read the request's input stream and return a JSONObject from it\r
- * @param req the HTTP request\r
- * @return the JSONObject, or null if the stream cannot be parsed\r
- */\r
- protected JSONObject getJSONfromInput(HttpServletRequest req) {\r
- JSONObject jo = null;\r
- try {\r
- jo = new JSONObject(new JSONTokener(req.getInputStream()));\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug("JSON: "+jo.toString());\r
- } catch (Exception e) {\r
- intlogger.info("Error reading JSON: "+e);\r
- }\r
- return jo;\r
- }\r
- /**\r
- * Check if the remote host is authorized to perform provisioning.\r
- * Is the request secure?\r
- * Is it coming from an authorized IP address or network (configured via PROV_AUTH_ADDRESSES)?\r
- * Does it have a valid client certificate (configured via PROV_AUTH_SUBJECTS)?\r
- * @param request the request\r
- * @return an error string, or null if all is OK\r
- */\r
- protected String isAuthorizedForProvisioning(HttpServletRequest request) {\r
- // Is the request https?\r
- if (require_secure && !request.isSecure()) {\r
- return "Request must be made over an HTTPS connection.";\r
- }\r
-\r
- // Is remote IP authorized?\r
- String remote = request.getRemoteAddr();\r
- try {\r
- boolean found = false;\r
- InetAddress ip = InetAddress.getByName(remote);\r
- for (String addrnet : authorizedAddressesAndNetworks) {\r
- found |= addressMatchesNetwork(ip, addrnet);\r
- }\r
- if (!found) {\r
- return "Unauthorized address: "+remote;\r
- }\r
- } catch (UnknownHostException e) {\r
- return "Unauthorized address: "+remote;\r
- }\r
-\r
- // Does remote have a valid certificate?\r
- if (require_cert) {\r
- X509Certificate certs[] = (X509Certificate[]) request.getAttribute(CERT_ATTRIBUTE);\r
- if (certs == null || certs.length == 0) {\r
- return "Client certificate is missing.";\r
- }\r
- // cert[0] is the client cert\r
- // see http://www.proto.research.att.com/java/java7/api/javax/net/ssl/SSLSession.html#getPeerCertificates()\r
- String name = certs[0].getSubjectX500Principal().getName();\r
- if (!authorizedNames.contains(name)) {\r
- return "No authorized certificate found.";\r
- }\r
- }\r
-\r
- // No problems!\r
- return null;\r
- }\r
- /**\r
- * Check if the remote IP address is authorized to see the /internal URL tree.\r
- * @param request the HTTP request\r
- * @return true iff authorized\r
- */\r
- protected boolean isAuthorizedForInternal(HttpServletRequest request) {\r
- try {\r
- InetAddress ip = InetAddress.getByName(request.getRemoteAddr());\r
- for (InetAddress node : getNodeAddresses()) {\r
- if (node != null && ip.equals(node))\r
- return true;\r
- }\r
- for (InetAddress pod : getPodAddresses()) {\r
- if (pod != null && ip.equals(pod))\r
- return true;\r
- }\r
- if (thishost != null && ip.equals(thishost))\r
- return true;\r
- if (loopback != null && ip.equals(loopback))\r
- return true;\r
- // Also allow the "special subnet" access\r
- if (addressMatchesNetwork(ip, special_subnet_secondary))\r
- return true;\r
- if (addressMatchesNetwork(ip, special_subnet))\r
- return true;\r
- } catch (UnknownHostException e) {\r
- // ignore\r
- }\r
- return false;\r
- }\r
- /**\r
- * Check if an IP address matches a network address.\r
- * @param ip the IP address\r
- * @param s the network address; a bare IP address may be matched also\r
- * @return true if they intersect\r
- */\r
- protected static boolean addressMatchesNetwork(InetAddress ip, String s) {\r
- int mlen = -1;\r
- int n = s.indexOf("/");\r
- if (n >= 0) {\r
- mlen = Integer.parseInt(s.substring(n+1));\r
- s = s.substring(0, n);\r
- }\r
- try {\r
- InetAddress i2 = InetAddress.getByName(s);\r
- byte[] b1 = ip.getAddress();\r
- byte[] b2 = i2.getAddress();\r
- if (b1.length != b2.length)\r
- return false;\r
- if (mlen > 0) {\r
- byte[] masks = {\r
- (byte)0x00, (byte)0x80, (byte)0xC0, (byte)0xE0,\r
- (byte)0xF0, (byte)0xF8, (byte)0xFC, (byte)0xFE\r
- };\r
- byte mask = masks[mlen%8];\r
- for (n = mlen/8; n < b1.length; n++) {\r
- b1[n] &= mask;\r
- b2[n] &= mask;\r
- mask = 0;\r
- }\r
- }\r
- for (n = 0; n < b1.length; n++)\r
- if (b1[n] != b2[n])\r
- return false;\r
- } catch (UnknownHostException e) {\r
- return false;\r
- }\r
- return true;\r
- }\r
- /**\r
- * Something has changed in the provisioning data.\r
- * Start the timers that will cause the pre-packaged JSON string to be regenerated,\r
- * and cause nodes and the other provisioning server to be notified.\r
- */\r
- public static void provisioningDataChanged() {\r
- long now = System.currentTimeMillis();\r
- Poker p = Poker.getPoker();\r
- p.setTimers(now + (poke_timer1 * 1000L), now + (poke_timer2 * 1000L));\r
- }\r
- /**\r
- * Something in the parameters has changed, reload all parameters from the DB.\r
- */\r
- public static void provisioningParametersChanged() {\r
- Map<String,String> map = Parameters.getParameters();\r
- require_secure = getBoolean(map, Parameters.PROV_REQUIRE_SECURE);\r
- require_cert = getBoolean(map, Parameters.PROV_REQUIRE_CERT);\r
- authorizedAddressesAndNetworks = getSet(map, Parameters.PROV_AUTH_ADDRESSES);\r
- authorizedNames = getSet (map, Parameters.PROV_AUTH_SUBJECTS);\r
- nodes = getSet (map, Parameters.NODES).toArray(new String[0]);\r
- max_feeds = getInt (map, Parameters.PROV_MAXFEED_COUNT, DEFAULT_MAX_FEEDS);\r
- max_subs = getInt (map, Parameters.PROV_MAXSUB_COUNT, DEFAULT_MAX_SUBS);\r
- poke_timer1 = getInt (map, Parameters.PROV_POKETIMER1, DEFAULT_POKETIMER1);\r
- poke_timer2 = getInt (map, Parameters.PROV_POKETIMER2, DEFAULT_POKETIMER2);\r
- prov_domain = getString (map, Parameters.PROV_DOMAIN, DEFAULT_DOMAIN);\r
- prov_name = getString (map, Parameters.PROV_NAME, DEFAULT_PROVSRVR_NAME);\r
- active_prov_name = getString (map, Parameters.PROV_ACTIVE_NAME, prov_name);\r
- special_subnet = getString (map, Parameters.PROV_SPECIAL_SUBNET, RESEARCH_SUBNET);\r
- static_routing_nodes = getString (map, Parameters.STATIC_ROUTING_NODES, ""); //Adding new param for static Routing - Rally:US664862-1610\r
- initial_active_pod = getString (map, Parameters.ACTIVE_POD, "");\r
- initial_standby_pod = getString (map, Parameters.STANDBY_POD, "");\r
- static_routing_nodes = getString (map, Parameters.STATIC_ROUTING_NODES, ""); //Adding new param for static Routing - Rally:US664862-1610\r
- active_feeds = Feed.countActiveFeeds();\r
- active_subs = Subscription.countActiveSubscriptions();\r
- try {\r
- this_pod = InetAddress.getLocalHost().getHostName();\r
- } catch (UnknownHostException e) {\r
- this_pod = "";\r
- intlogger.warn("PROV0014 Cannot determine the name of this provisioning server.");\r
- }\r
-\r
- // Normalize the nodes, and fill in nodeAddresses\r
- InetAddress[] na = new InetAddress[nodes.length];\r
- for (int i = 0; i < nodes.length; i++) {\r
- if (nodes[i].indexOf('.') < 0)\r
- nodes[i] += "." + prov_domain;\r
- try {\r
- na[i] = InetAddress.getByName(nodes[i]);\r
- intlogger.debug("PROV0003 DNS lookup: "+nodes[i]+" => "+na[i].toString());\r
- } catch (UnknownHostException e) {\r
- na[i] = null;\r
- intlogger.warn("PROV0004 Cannot lookup "+nodes[i]+": "+e);\r
- }\r
- }\r
-\r
- //Reset Nodes arr after - removing static routing Nodes, Rally Userstory - US664862 . \r
- List<String> filterNodes = new ArrayList<>(); \r
- for (int i = 0; i < nodes.length; i++) { \r
- if(!static_routing_nodes.contains(nodes[i])){ \r
- filterNodes.add(nodes[i]); \r
- } \r
- } \r
- String [] filteredNodes = filterNodes.toArray(new String[filterNodes.size()]); \r
- nodes = filteredNodes;\r
-\r
- nodeAddresses = na;\r
- NodeClass.setNodes(nodes); // update NODES table\r
-\r
- // Normalize the PODs, and fill in podAddresses\r
- String[] pods = getPods();\r
- na = new InetAddress[pods.length];\r
- for (int i = 0; i < pods.length; i++) {\r
- if (pods[i].indexOf('.') < 0)\r
- pods[i] += "." + prov_domain;\r
- try {\r
- na[i] = InetAddress.getByName(pods[i]);\r
- intlogger.debug("PROV0003 DNS lookup: "+pods[i]+" => "+na[i].toString());\r
- } catch (UnknownHostException e) {\r
- na[i] = null;\r
- intlogger.warn("PROV0004 Cannot lookup "+pods[i]+": "+e);\r
- }\r
- }\r
- podAddresses = na;\r
-\r
- // Update ThrottleFilter\r
- ThrottleFilter.configure();\r
-\r
- // Check if we are active or standby POD\r
- if (!isInitialActivePOD() && !isInitialStandbyPOD())\r
- intlogger.warn("PROV0015 This machine is neither the active nor the standby POD.");\r
- }\r
-\r
-\r
- /**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- * Load mail properties.\r
- * @author vs215k\r
- * \r
- **/\r
- private void loadMailProperties() {\r
- if (mailprops == null) {\r
- mailprops = new Properties();\r
- InputStream inStream = getClass().getClassLoader().getResourceAsStream(MAILCONFIG_FILE);\r
- try {\r
- mailprops.load(inStream);\r
- } catch (IOException e) {\r
- intlogger.fatal("PROV9003 Opening properties: "+e.getMessage());\r
- e.printStackTrace();\r
- System.exit(1);\r
- }\r
- finally {\r
- try {\r
- inStream.close();\r
- } \r
- catch (IOException e) {\r
- }\r
- }\r
- }\r
- }\r
- \r
- /**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- * Check if HTTPS Relexaction is enabled \r
- * @author vs215k\r
- * \r
- **/\r
- private void checkHttpsRelaxation() {\r
- if(mailSendFlag == false) {\r
- Properties p = (new DB()).getProperties();\r
- intlogger.info("HTTPS relaxatio: "+p.get("org.onap.dmaap.datarouter.provserver.https.relaxation"));\r
- \r
- if(p.get("org.onap.dmaap.datarouter.provserver.https.relaxation").equals("true")) {\r
- try {\r
- notifyPSTeam(p.get("org.onap.dmaap.datarouter.provserver.https.relax.notify").toString());\r
- } \r
- catch (Exception e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- mailSendFlag = true;\r
- }\r
- }\r
- \r
- /**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- * @author vs215k\r
- * @param email - list of email ids to notify if HTTP relexcation is enabled. \r
- **/\r
- private void notifyPSTeam(String email) throws Exception {\r
- loadMailProperties(); //Load HTTPS Relex mail properties.\r
- String[] emails = email.split(Pattern.quote("|"));\r
- \r
- Properties mailproperties = new Properties();\r
- mailproperties.put("mail.smtp.host", mailprops.get("com.att.dmaap.datarouter.mail.server"));\r
- mailproperties.put("mail.transport.protocol", mailprops.get("com.att.dmaap.datarouter.mail.protocol"));\r
- \r
- Session session = Session.getDefaultInstance(mailproperties, null);\r
- Multipart mp = new MimeMultipart();\r
- MimeBodyPart htmlPart = new MimeBodyPart();\r
- \r
- try {\r
- \r
- Message msg = new MimeMessage(session);\r
- msg.setFrom(new InternetAddress(mailprops.get("com.att.dmaap.datarouter.mail.from").toString()));\r
- \r
- InternetAddress[] addressTo = new InternetAddress[emails.length];\r
- for ( int x =0 ; x < emails.length; x++) {\r
- addressTo[x] = new InternetAddress(emails[x]);\r
- }\r
- \r
- msg.addRecipients(Message.RecipientType.TO, addressTo);\r
- msg.setSubject(mailprops.get("com.att.dmaap.datarouter.mail.subject").toString());\r
- htmlPart.setContent(mailprops.get("com.att.dmaap.datarouter.mail.body").toString().replace("[SERVER]", InetAddress.getLocalHost().getHostName()), "text/html");\r
- mp.addBodyPart(htmlPart);\r
- msg.setContent(mp);\r
- \r
- System.out.println(mailprops.get("com.att.dmaap.datarouter.mail.body").toString().replace("[SERVER]", InetAddress.getLocalHost().getHostName()));\r
- \r
- Transport.send(msg);\r
- intlogger.info("HTTPS relaxation mail is sent to - : "+email);\r
- \r
- } catch (AddressException e) {\r
- intlogger.error("Invalid email address, unable to send https relaxation mail to - : "+email);\r
- } catch (MessagingException e) {\r
- intlogger.error("Invalid email address, unable to send https relaxation mail to - : "+email);\r
- } \r
- }\r
-\r
-\r
- /**\r
- * Get an array of all node names in the DR network.\r
- * @return an array of Strings\r
- */\r
- public static String[] getNodes() {\r
- return nodes;\r
- }\r
- /**\r
- * Get an array of all node InetAddresses in the DR network.\r
- * @return an array of InetAddresses\r
- */\r
- public static InetAddress[] getNodeAddresses() {\r
- return nodeAddresses;\r
- }\r
- /**\r
- * Get an array of all POD names in the DR network.\r
- * @return an array of Strings\r
- */\r
- public static String[] getPods() {\r
- return new String[] { initial_active_pod, initial_standby_pod };\r
- }\r
- /**\r
- * Get an array of all POD InetAddresses in the DR network.\r
- * @return an array of InetAddresses\r
- */\r
- public static InetAddress[] getPodAddresses() {\r
- return podAddresses;\r
- }\r
- /**\r
- * Gets the FQDN of the initially ACTIVE provisioning server (POD).\r
- * Note: this used to be called isActivePOD(), however, that is a misnomer, as the active status\r
- * could shift to the standby POD without these parameters changing. Hence, the function names\r
- * have been changed to more accurately reflect their purpose.\r
- * @return the FQDN\r
- */\r
- public static boolean isInitialActivePOD() {\r
- return this_pod.equals(initial_active_pod);\r
- }\r
- /**\r
- * Gets the FQDN of the initially STANDBY provisioning server (POD).\r
- * Note: this used to be called isStandbyPOD(), however, that is a misnomer, as the standby status\r
- * could shift to the active POD without these parameters changing. Hence, the function names\r
- * have been changed to more accurately reflect their purpose.\r
- * @return the FQDN\r
- */\r
- public static boolean isInitialStandbyPOD() {\r
- return this_pod.equals(initial_standby_pod);\r
- }\r
- /**\r
- * INSERT an {@link Insertable} bean into the database.\r
- * @param bean the bean representing a row to insert\r
- * @return true if the INSERT was successful\r
- */\r
- protected boolean doInsert(Insertable bean) {\r
- boolean rv = false;\r
- DB db = new DB();\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- rv = bean.doInsert(conn);\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- if (conn != null)\r
- db.release(conn);\r
- }\r
- return rv;\r
- }\r
- /**\r
- * UPDATE an {@link Updateable} bean in the database.\r
- * @param bean the bean representing a row to update\r
- * @return true if the UPDATE was successful\r
- */\r
- protected boolean doUpdate(Updateable bean) {\r
- boolean rv = false;\r
- DB db = new DB();\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- rv = bean.doUpdate(conn);\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- if (conn != null)\r
- db.release(conn);\r
- }\r
- return rv;\r
- }\r
- /**\r
- * DELETE an {@link Deleteable} bean from the database.\r
- * @param bean the bean representing a row to delete\r
- * @return true if the DELETE was successful\r
- */\r
- protected boolean doDelete(Deleteable bean) {\r
- boolean rv = false;\r
- DB db = new DB();\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- rv = bean.doDelete(conn);\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- if (conn != null)\r
- db.release(conn);\r
- }\r
- return rv;\r
- }\r
- private static boolean getBoolean(Map<String,String> map, String name) {\r
- String s = map.get(name);\r
- return (s != null) && s.equalsIgnoreCase("true");\r
- }\r
- private static String getString(Map<String,String> map, String name, String dflt) {\r
- String s = map.get(name);\r
- return (s != null) ? s : dflt;\r
- }\r
- private static int getInt(Map<String,String> map, String name, int dflt) {\r
- try {\r
- String s = map.get(name);\r
- return Integer.parseInt(s);\r
- } catch (NumberFormatException e) {\r
- return dflt;\r
- }\r
- }\r
- private static Set<String> getSet(Map<String,String> map, String name) {\r
- Set<String> set = new HashSet<String>();\r
- String s = map.get(name);\r
- if (s != null) {\r
- String[] pp = s.split("\\|");\r
- if (pp != null) {\r
- for (String t : pp) {\r
- String t2 = t.trim();\r
- if (t2.length() > 0)\r
- set.add(t2);\r
- }\r
- }\r
- }\r
- return set;\r
- }\r
-\r
- /**\r
- * A class used to encapsulate a Content-type header, separating out the "version" attribute\r
- * (which defaults to "1.0" if missing).\r
- */\r
- public class ContentHeader {\r
- private String type = "";\r
- private Map<String, String> map = new HashMap<String, String>();\r
- public ContentHeader() {\r
- this("", "1.0");\r
- }\r
- public ContentHeader(String t, String v) {\r
- type = t.trim();\r
- map.put("version", v);\r
- }\r
- public String getType() {\r
- return type;\r
- }\r
- public String getAttribute(String key) {\r
- String s = map.get(key);\r
- if (s == null)\r
- s = "";\r
- return s;\r
- }\r
- }\r
-\r
- /**\r
- * Get the ContentHeader from an HTTP request.\r
- * @param req the request\r
- * @return the header, encapsulated in a ContentHeader object\r
- */\r
- public ContentHeader getContentHeader(HttpServletRequest req) {\r
- ContentHeader ch = new ContentHeader();\r
- String s = req.getHeader("Content-Type");\r
- if (s != null) {\r
- String[] pp = s.split(";");\r
- ch.type = pp[0].trim();\r
- for (int i = 1; i < pp.length; i++) {\r
- int ix = pp[i].indexOf('=');\r
- if (ix > 0) {\r
- String k = pp[i].substring(0, ix).trim();\r
- String v = pp[i].substring(ix+1).trim();\r
- ch.map.put(k, v);\r
- } else {\r
- ch.map.put(pp[i].trim(), "");\r
- }\r
- }\r
- }\r
- return ch;\r
- }\r
- // Methods for the Policy Engine classes - ProvDataProvider interface\r
- @Override\r
- public String getFeedOwner(String feedId) {\r
- try {\r
- int n = Integer.parseInt(feedId);\r
- Feed f = Feed.getFeedById(n);\r
- if (f != null)\r
- return f.getPublisher();\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- return null;\r
- }\r
- @Override\r
- public String getFeedClassification(String feedId) {\r
- try {\r
- int n = Integer.parseInt(feedId);\r
- Feed f = Feed.getFeedById(n);\r
- if (f != null)\r
- return f.getAuthorization().getClassification();\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- return null;\r
- }\r
- @Override\r
- public String getSubscriptionOwner(String subId) {\r
- try {\r
- int n = Integer.parseInt(subId);\r
- Subscription s = Subscription.getSubscriptionById(n);\r
- if (s != null)\r
- return s.getSubscriber();\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- return null;\r
- }\r
-\r
- /*\r
- * @Method - isUserMemberOfGroup - Rally:US708115 \r
- * @Params - group object and user to check if exists in given group\r
- * @return - boolean value /true/false\r
- */\r
- private boolean isUserMemberOfGroup(Group group, String user) {\r
- \r
- String groupdetails = group.getMembers().replace("]", "").replace("[", "");\r
- String s[] = groupdetails.split("},");\r
- \r
- for(int i=0; i < s.length; i++) {\r
- JSONObject jsonObj = null;\r
- try {\r
- jsonObj = new JSONObject(s[i]+"}");\r
- if(jsonObj.get("id").equals(user))\r
- return true;\r
- } catch (JSONException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return false;\r
- \r
- }\r
- \r
- /*\r
- * @Method - getGroupByFeedGroupId- Rally:US708115 \r
- * @Params - User to check in group and feedid which is assigned the group.\r
- * @return - string value grupid/null\r
- */\r
- @Override\r
- public String getGroupByFeedGroupId(String owner, String feedId) {\r
- try {\r
- int n = Integer.parseInt(feedId);\r
- Feed f = Feed.getFeedById(n);\r
- if (f != null) {\r
- int groupid = f.getGroupid();\r
- if(groupid > 0) {\r
- Group group = Group.getGroupById(groupid);\r
- if(isUserMemberOfGroup(group, owner)) {\r
- return group.getAuthid();\r
- }\r
- }\r
- }\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- return null;\r
- }\r
- \r
- /*\r
- * @Method - getGroupBySubGroupId - Rally:US708115 \r
- * @Params - User to check in group and subid which is assigned the group.\r
- * @return - string value grupid/null\r
- */\r
- @Override\r
- public String getGroupBySubGroupId(String owner, String subId) {\r
- try {\r
- int n = Integer.parseInt(subId);\r
- Subscription s = Subscription.getSubscriptionById(n);\r
- if (s != null) {\r
- int groupid = s.getGroupid();\r
- if(groupid > 0) {\r
- Group group = Group.getGroupById(groupid);\r
- if(isUserMemberOfGroup(group, owner)) {\r
- return group.getAuthid();\r
- }\r
- }\r
- }\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- return null;\r
- }\r
- \r
- /*\r
- * @Method - setIpAndFqdnForEelf - Rally:US664892 \r
- * @Params - method, prints method name in EELF log.\r
- */ \r
- protected void setIpAndFqdnForEelf(String method) {\r
- MDC.clear();\r
- MDC.put(MDC_SERVICE_NAME, method);\r
- try {\r
- MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());\r
- MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- }\r
-\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;
+
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.security.cert.X509Certificate;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.List;
+import java.util.ArrayList;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.log4j.Logger;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.onap.dmaap.datarouter.authz.Authorizer;
+import org.onap.dmaap.datarouter.authz.impl.ProvAuthorizer;
+import org.onap.dmaap.datarouter.authz.impl.ProvDataProvider;
+import org.onap.dmaap.datarouter.provisioning.beans.Deleteable;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.beans.Group;
+import org.onap.dmaap.datarouter.provisioning.beans.Insertable;
+import org.onap.dmaap.datarouter.provisioning.beans.NodeClass;
+import org.onap.dmaap.datarouter.provisioning.beans.Parameters;
+import org.onap.dmaap.datarouter.provisioning.beans.Subscription;
+import org.onap.dmaap.datarouter.provisioning.beans.Updateable;
+import org.onap.dmaap.datarouter.provisioning.utils.DB;
+import org.onap.dmaap.datarouter.provisioning.utils.ThrottleFilter;
+import org.json.JSONException;
+import org.slf4j.MDC;
+
+import java.util.Properties;
+import java.util.regex.Pattern;
+import javax.mail.Message;
+import javax.mail.MessagingException;
+import javax.mail.Multipart;
+import javax.mail.Session;
+import javax.mail.Transport;
+import javax.mail.internet.AddressException;
+import javax.mail.internet.InternetAddress;
+import javax.mail.internet.MimeBodyPart;
+import javax.mail.internet.MimeMessage;
+import javax.mail.internet.MimeMultipart;
+/**
+ * This is the base class for all Servlets in the provisioning code.
+ * It provides standard constants and some common methods.
+ *
+ * @author Robert Eby
+ * @version $Id: BaseServlet.java,v 1.16 2014/03/12 19:45:40 eby Exp $
+ */
+@SuppressWarnings("serial")
+public class BaseServlet extends HttpServlet implements ProvDataProvider {
+ public static final String BEHALF_HEADER = "X-ATT-DR-ON-BEHALF-OF";
+ public static final String FEED_BASECONTENT_TYPE = "application/vnd.att-dr.feed";
+ public static final String FEED_CONTENT_TYPE = "application/vnd.att-dr.feed; version=2.0";
+ public static final String FEEDFULL_CONTENT_TYPE = "application/vnd.att-dr.feed-full; version=2.0";
+ public static final String FEEDLIST_CONTENT_TYPE = "application/vnd.att-dr.feed-list; version=1.0";
+ public static final String SUB_BASECONTENT_TYPE = "application/vnd.att-dr.subscription";
+ public static final String SUB_CONTENT_TYPE = "application/vnd.att-dr.subscription; version=2.0";
+ public static final String SUBFULL_CONTENT_TYPE = "application/vnd.att-dr.subscription-full; version=2.0";
+ public static final String SUBLIST_CONTENT_TYPE = "application/vnd.att-dr.subscription-list; version=1.0";
+
+
+ //Adding groups functionality, ...1610
+ public static final String GROUP_BASECONTENT_TYPE = "application/vnd.att-dr.group";
+ public static final String GROUP_CONTENT_TYPE = "application/vnd.att-dr.group; version=2.0";
+ public static final String GROUPFULL_CONTENT_TYPE = "application/vnd.att-dr.group-full; version=2.0";
+ public static final String GROUPLIST_CONTENT_TYPE = "application/vnd.att-dr.fegrouped-list; version=1.0";
+
+
+ public static final String LOGLIST_CONTENT_TYPE = "application/vnd.att-dr.log-list; version=1.0";
+ public static final String PROVFULL_CONTENT_TYPE1 = "application/vnd.att-dr.provfeed-full; version=1.0";
+ public static final String PROVFULL_CONTENT_TYPE2 = "application/vnd.att-dr.provfeed-full; version=2.0";
+ public static final String CERT_ATTRIBUTE = "javax.servlet.request.X509Certificate";
+
+ public static final String DB_PROBLEM_MSG = "There has been a problem with the DB. It is suggested you try the operation again.";
+
+ public static final int DEFAULT_MAX_FEEDS = 10000;
+ public static final int DEFAULT_MAX_SUBS = 100000;
+ public static final int DEFAULT_POKETIMER1 = 5;
+ public static final int DEFAULT_POKETIMER2 = 30;
+ public static final String DEFAULT_DOMAIN = "web.att.com";
+ public static final String DEFAULT_PROVSRVR_NAME = "feeds-drtr.web.att.com";
+ public static final String RESEARCH_SUBNET = "135.207.136.128/25";
+ public static final String STATIC_ROUTING_NODES = ""; //Adding new param for static Routing - Rally:US664862-1610
+
+ /** A boolean to trigger one time "provisioning changed" event on startup */
+ private static boolean startmsg_flag = true;
+ /** This POD should require SSL connections from clients; pulled from the DB (PROV_REQUIRE_SECURE) */
+ private static boolean require_secure = true;
+ /** This POD should require signed, recognized certificates from clients; pulled from the DB (PROV_REQUIRE_CERT) */
+ private static boolean require_cert = true;
+ /** The set of authorized addresses and networks; pulled from the DB (PROV_AUTH_ADDRESSES) */
+ private static Set<String> authorizedAddressesAndNetworks = new HashSet<String>();
+ /** The set of authorized names; pulled from the DB (PROV_AUTH_SUBJECTS) */
+ private static Set<String> authorizedNames = new HashSet<String>();
+ /** The FQDN of the initially "active" provisioning server in this Data Router ecosystem */
+ private static String initial_active_pod;
+ /** The FQDN of the initially "standby" provisioning server in this Data Router ecosystem */
+ private static String initial_standby_pod;
+ /** The FQDN of this provisioning server in this Data Router ecosystem */
+ private static String this_pod;
+ /** "Timer 1" - used to determine when to notify nodes of provisioning changes */
+ private static long poke_timer1;
+ /** "Timer 2" - used to determine when to notify nodes of provisioning changes */
+ private static long poke_timer2;
+ /** Array of nodes names and/or FQDNs */
+ private static String[] nodes = new String[0];
+ /** Array of node IP addresses */
+ private static InetAddress[] nodeAddresses = new InetAddress[0];
+ /** Array of POD IP addresses */
+ private static InetAddress[] podAddresses = new InetAddress[0];
+ /** The maximum number of feeds allowed; pulled from the DB (PROV_MAXFEED_COUNT) */
+ protected static int max_feeds = 0;
+ /** The maximum number of subscriptions allowed; pulled from the DB (PROV_MAXSUB_COUNT) */
+ protected static int max_subs = 0;
+ /** The current number of feeds in the system */
+ protected static int active_feeds = 0;
+ /** The current number of subscriptions in the system */
+ protected static int active_subs = 0;
+ /** The domain used to generate a FQDN from the "bare" node names */
+ public static String prov_domain = "web.att.com";
+ /** The standard FQDN of the provisioning server in this Data Router ecosystem */
+ public static String prov_name = "feeds-drtr.web.att.com";
+ /** The standard FQDN of the ACTIVE provisioning server in this Data Router ecosystem */
+ public static String active_prov_name = "feeds-drtr.web.att.com";
+ /** Special subnet that is allowed access to /internal */
+ protected static String special_subnet = RESEARCH_SUBNET;
+
+ /** Special subnet that is allowed access to /internal to Lab Machine */
+ protected static String special_subnet_secondary = RESEARCH_SUBNET;
+ protected static String static_routing_nodes = STATIC_ROUTING_NODES; //Adding new param for static Routing - Rally:US664862-1610
+
+ /** This logger is used to log provisioning events */
+ protected static Logger eventlogger;
+ /** This logger is used to log internal events (errors, etc.) */
+ protected static Logger intlogger;
+ /** Authorizer - interface to the Policy Engine */
+ protected static Authorizer authz;
+ /** The Synchronizer used to sync active DB to standby one */
+ protected static SynchronizerTask synctask = null;
+
+ //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
+ private InetAddress thishost;
+ private InetAddress loopback;
+ private static Boolean mailSendFlag = false;
+
+ public static final String MAILCONFIG_FILE = "mail.properties";
+ private static Properties mailprops;
+ /**
+ * Initialize data common to all the provisioning server servlets.
+ */
+ protected BaseServlet() {
+ if (eventlogger == null)
+ eventlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.events");
+ if (intlogger == null)
+ intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");
+ if (authz == null)
+ authz = new ProvAuthorizer(this);
+ if (startmsg_flag) {
+ startmsg_flag = false;
+ provisioningParametersChanged();
+ }
+ if (synctask == null) {
+ synctask = SynchronizerTask.getSynchronizer();
+ }
+ String name = this.getClass().getName();
+ intlogger.info("PROV0002 Servlet "+name+" started.");
+ }
+ @Override
+ public void init(ServletConfig config) throws ServletException {
+ super.init(config);
+ try {
+ thishost = InetAddress.getLocalHost();
+ loopback = InetAddress.getLoopbackAddress();
+ checkHttpsRelaxation(); //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
+ } catch (UnknownHostException e) {
+ // ignore
+ }
+ }
+ protected int getIdFromPath(HttpServletRequest req) {
+ String path = req.getPathInfo();
+ if (path == null || path.length() < 2)
+ return -1;
+ try {
+ return Integer.parseInt(path.substring(1));
+ } catch (NumberFormatException e) {
+ return -1;
+ }
+ }
+ /**
+ * Read the request's input stream and return a JSONObject from it
+ * @param req the HTTP request
+ * @return the JSONObject, or null if the stream cannot be parsed
+ */
+ protected JSONObject getJSONfromInput(HttpServletRequest req) {
+ JSONObject jo = null;
+ try {
+ jo = new JSONObject(new JSONTokener(req.getInputStream()));
+ if (intlogger.isDebugEnabled())
+ intlogger.debug("JSON: "+jo.toString());
+ } catch (Exception e) {
+ intlogger.info("Error reading JSON: "+e);
+ }
+ return jo;
+ }
+ /**
+ * Check if the remote host is authorized to perform provisioning.
+ * Is the request secure?
+ * Is it coming from an authorized IP address or network (configured via PROV_AUTH_ADDRESSES)?
+ * Does it have a valid client certificate (configured via PROV_AUTH_SUBJECTS)?
+ * @param request the request
+ * @return an error string, or null if all is OK
+ */
+ protected String isAuthorizedForProvisioning(HttpServletRequest request) {
+ // Is the request https?
+ if (require_secure && !request.isSecure()) {
+ return "Request must be made over an HTTPS connection.";
+ }
+
+ // Is remote IP authorized?
+ String remote = request.getRemoteAddr();
+ try {
+ boolean found = false;
+ InetAddress ip = InetAddress.getByName(remote);
+ for (String addrnet : authorizedAddressesAndNetworks) {
+ found |= addressMatchesNetwork(ip, addrnet);
+ }
+ if (!found) {
+ return "Unauthorized address: "+remote;
+ }
+ } catch (UnknownHostException e) {
+ return "Unauthorized address: "+remote;
+ }
+
+ // Does remote have a valid certificate?
+ if (require_cert) {
+ X509Certificate certs[] = (X509Certificate[]) request.getAttribute(CERT_ATTRIBUTE);
+ if (certs == null || certs.length == 0) {
+ return "Client certificate is missing.";
+ }
+ // cert[0] is the client cert
+ // see http://www.proto.research.att.com/java/java7/api/javax/net/ssl/SSLSession.html#getPeerCertificates()
+ String name = certs[0].getSubjectX500Principal().getName();
+ if (!authorizedNames.contains(name)) {
+ return "No authorized certificate found.";
+ }
+ }
+
+ // No problems!
+ return null;
+ }
+ /**
+ * Check if the remote IP address is authorized to see the /internal URL tree.
+ * @param request the HTTP request
+ * @return true iff authorized
+ */
+ protected boolean isAuthorizedForInternal(HttpServletRequest request) {
+ try {
+ InetAddress ip = InetAddress.getByName(request.getRemoteAddr());
+ for (InetAddress node : getNodeAddresses()) {
+ if (node != null && ip.equals(node))
+ return true;
+ }
+ for (InetAddress pod : getPodAddresses()) {
+ if (pod != null && ip.equals(pod))
+ return true;
+ }
+ if (thishost != null && ip.equals(thishost))
+ return true;
+ if (loopback != null && ip.equals(loopback))
+ return true;
+ // Also allow the "special subnet" access
+ if (addressMatchesNetwork(ip, special_subnet_secondary))
+ return true;
+ if (addressMatchesNetwork(ip, special_subnet))
+ return true;
+ } catch (UnknownHostException e) {
+ // ignore
+ }
+ return false;
+ }
+ /**
+ * Check if an IP address matches a network address.
+ * @param ip the IP address
+ * @param s the network address; a bare IP address may be matched also
+ * @return true if they intersect
+ */
+ protected static boolean addressMatchesNetwork(InetAddress ip, String s) {
+ int mlen = -1;
+ int n = s.indexOf("/");
+ if (n >= 0) {
+ mlen = Integer.parseInt(s.substring(n+1));
+ s = s.substring(0, n);
+ }
+ try {
+ InetAddress i2 = InetAddress.getByName(s);
+ byte[] b1 = ip.getAddress();
+ byte[] b2 = i2.getAddress();
+ if (b1.length != b2.length)
+ return false;
+ if (mlen > 0) {
+ byte[] masks = {
+ (byte)0x00, (byte)0x80, (byte)0xC0, (byte)0xE0,
+ (byte)0xF0, (byte)0xF8, (byte)0xFC, (byte)0xFE
+ };
+ byte mask = masks[mlen%8];
+ for (n = mlen/8; n < b1.length; n++) {
+ b1[n] &= mask;
+ b2[n] &= mask;
+ mask = 0;
+ }
+ }
+ for (n = 0; n < b1.length; n++)
+ if (b1[n] != b2[n])
+ return false;
+ } catch (UnknownHostException e) {
+ return false;
+ }
+ return true;
+ }
+ /**
+ * Something has changed in the provisioning data.
+ * Start the timers that will cause the pre-packaged JSON string to be regenerated,
+ * and cause nodes and the other provisioning server to be notified.
+ */
+ public static void provisioningDataChanged() {
+ long now = System.currentTimeMillis();
+ Poker p = Poker.getPoker();
+ p.setTimers(now + (poke_timer1 * 1000L), now + (poke_timer2 * 1000L));
+ }
+ /**
+ * Something in the parameters has changed, reload all parameters from the DB.
+ */
+ public static void provisioningParametersChanged() {
+ Map<String,String> map = Parameters.getParameters();
+ require_secure = getBoolean(map, Parameters.PROV_REQUIRE_SECURE);
+ require_cert = getBoolean(map, Parameters.PROV_REQUIRE_CERT);
+ authorizedAddressesAndNetworks = getSet(map, Parameters.PROV_AUTH_ADDRESSES);
+ authorizedNames = getSet (map, Parameters.PROV_AUTH_SUBJECTS);
+ nodes = getSet (map, Parameters.NODES).toArray(new String[0]);
+ max_feeds = getInt (map, Parameters.PROV_MAXFEED_COUNT, DEFAULT_MAX_FEEDS);
+ max_subs = getInt (map, Parameters.PROV_MAXSUB_COUNT, DEFAULT_MAX_SUBS);
+ poke_timer1 = getInt (map, Parameters.PROV_POKETIMER1, DEFAULT_POKETIMER1);
+ poke_timer2 = getInt (map, Parameters.PROV_POKETIMER2, DEFAULT_POKETIMER2);
+ prov_domain = getString (map, Parameters.PROV_DOMAIN, DEFAULT_DOMAIN);
+ prov_name = getString (map, Parameters.PROV_NAME, DEFAULT_PROVSRVR_NAME);
+ active_prov_name = getString (map, Parameters.PROV_ACTIVE_NAME, prov_name);
+ special_subnet = getString (map, Parameters.PROV_SPECIAL_SUBNET, RESEARCH_SUBNET);
+ static_routing_nodes = getString (map, Parameters.STATIC_ROUTING_NODES, ""); //Adding new param for static Routing - Rally:US664862-1610
+ initial_active_pod = getString (map, Parameters.ACTIVE_POD, "");
+ initial_standby_pod = getString (map, Parameters.STANDBY_POD, "");
+ static_routing_nodes = getString (map, Parameters.STATIC_ROUTING_NODES, ""); //Adding new param for static Routing - Rally:US664862-1610
+ active_feeds = Feed.countActiveFeeds();
+ active_subs = Subscription.countActiveSubscriptions();
+ try {
+ this_pod = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ this_pod = "";
+ intlogger.warn("PROV0014 Cannot determine the name of this provisioning server.");
+ }
+
+ // Normalize the nodes, and fill in nodeAddresses
+ InetAddress[] na = new InetAddress[nodes.length];
+ for (int i = 0; i < nodes.length; i++) {
+ if (nodes[i].indexOf('.') < 0)
+ nodes[i] += "." + prov_domain;
+ try {
+ na[i] = InetAddress.getByName(nodes[i]);
+ intlogger.debug("PROV0003 DNS lookup: "+nodes[i]+" => "+na[i].toString());
+ } catch (UnknownHostException e) {
+ na[i] = null;
+ intlogger.warn("PROV0004 Cannot lookup "+nodes[i]+": "+e);
+ }
+ }
+
+ //Reset Nodes arr after - removing static routing Nodes, Rally Userstory - US664862 .
+ List<String> filterNodes = new ArrayList<>();
+ for (int i = 0; i < nodes.length; i++) {
+ if(!static_routing_nodes.contains(nodes[i])){
+ filterNodes.add(nodes[i]);
+ }
+ }
+ String [] filteredNodes = filterNodes.toArray(new String[filterNodes.size()]);
+ nodes = filteredNodes;
+
+ nodeAddresses = na;
+ NodeClass.setNodes(nodes); // update NODES table
+
+ // Normalize the PODs, and fill in podAddresses
+ String[] pods = getPods();
+ na = new InetAddress[pods.length];
+ for (int i = 0; i < pods.length; i++) {
+ if (pods[i].indexOf('.') < 0)
+ pods[i] += "." + prov_domain;
+ try {
+ na[i] = InetAddress.getByName(pods[i]);
+ intlogger.debug("PROV0003 DNS lookup: "+pods[i]+" => "+na[i].toString());
+ } catch (UnknownHostException e) {
+ na[i] = null;
+ intlogger.warn("PROV0004 Cannot lookup "+pods[i]+": "+e);
+ }
+ }
+ podAddresses = na;
+
+ // Update ThrottleFilter
+ ThrottleFilter.configure();
+
+ // Check if we are active or standby POD
+ if (!isInitialActivePOD() && !isInitialStandbyPOD())
+ intlogger.warn("PROV0015 This machine is neither the active nor the standby POD.");
+ }
+
+
+ /**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
+ * Load mail properties.
+ * @author vs215k
+ *
+ **/
+ private void loadMailProperties() {
+ if (mailprops == null) {
+ mailprops = new Properties();
+ InputStream inStream = getClass().getClassLoader().getResourceAsStream(MAILCONFIG_FILE);
+ try {
+ mailprops.load(inStream);
+ } catch (IOException e) {
+ intlogger.fatal("PROV9003 Opening properties: "+e.getMessage());
+ e.printStackTrace();
+ System.exit(1);
+ }
+ finally {
+ try {
+ inStream.close();
+ }
+ catch (IOException e) {
+ }
+ }
+ }
+ }
+
+ /**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
+ * Check if HTTPS Relexaction is enabled
+ * @author vs215k
+ *
+ **/
+ private void checkHttpsRelaxation() {
+ if(mailSendFlag == false) {
+ Properties p = (new DB()).getProperties();
+ intlogger.info("HTTPS relaxatio: "+p.get("org.onap.dmaap.datarouter.provserver.https.relaxation"));
+
+ if(p.get("org.onap.dmaap.datarouter.provserver.https.relaxation").equals("true")) {
+ try {
+ notifyPSTeam(p.get("org.onap.dmaap.datarouter.provserver.https.relax.notify").toString());
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ mailSendFlag = true;
+ }
+ }
+
+ /**Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
+ * @author vs215k
+ * @param email - list of email ids to notify if HTTP relexcation is enabled.
+ **/
+ private void notifyPSTeam(String email) throws Exception {
+ loadMailProperties(); //Load HTTPS Relex mail properties.
+ String[] emails = email.split(Pattern.quote("|"));
+
+ Properties mailproperties = new Properties();
+ mailproperties.put("mail.smtp.host", mailprops.get("com.att.dmaap.datarouter.mail.server"));
+ mailproperties.put("mail.transport.protocol", mailprops.get("com.att.dmaap.datarouter.mail.protocol"));
+
+ Session session = Session.getDefaultInstance(mailproperties, null);
+ Multipart mp = new MimeMultipart();
+ MimeBodyPart htmlPart = new MimeBodyPart();
+
+ try {
+
+ Message msg = new MimeMessage(session);
+ msg.setFrom(new InternetAddress(mailprops.get("com.att.dmaap.datarouter.mail.from").toString()));
+
+ InternetAddress[] addressTo = new InternetAddress[emails.length];
+ for ( int x =0 ; x < emails.length; x++) {
+ addressTo[x] = new InternetAddress(emails[x]);
+ }
+
+ msg.addRecipients(Message.RecipientType.TO, addressTo);
+ msg.setSubject(mailprops.get("com.att.dmaap.datarouter.mail.subject").toString());
+ htmlPart.setContent(mailprops.get("com.att.dmaap.datarouter.mail.body").toString().replace("[SERVER]", InetAddress.getLocalHost().getHostName()), "text/html");
+ mp.addBodyPart(htmlPart);
+ msg.setContent(mp);
+
+ System.out.println(mailprops.get("com.att.dmaap.datarouter.mail.body").toString().replace("[SERVER]", InetAddress.getLocalHost().getHostName()));
+
+ Transport.send(msg);
+ intlogger.info("HTTPS relaxation mail is sent to - : "+email);
+
+ } catch (AddressException e) {
+ intlogger.error("Invalid email address, unable to send https relaxation mail to - : "+email);
+ } catch (MessagingException e) {
+ intlogger.error("Invalid email address, unable to send https relaxation mail to - : "+email);
+ }
+ }
+
+
+ /**
+ * Get an array of all node names in the DR network.
+ * @return an array of Strings
+ */
+ public static String[] getNodes() {
+ return nodes;
+ }
+ /**
+ * Get an array of all node InetAddresses in the DR network.
+ * @return an array of InetAddresses
+ */
+ public static InetAddress[] getNodeAddresses() {
+ return nodeAddresses;
+ }
+ /**
+ * Get an array of all POD names in the DR network.
+ * @return an array of Strings
+ */
+ public static String[] getPods() {
+ return new String[] { initial_active_pod, initial_standby_pod };
+ }
+ /**
+ * Get an array of all POD InetAddresses in the DR network.
+ * @return an array of InetAddresses
+ */
+ public static InetAddress[] getPodAddresses() {
+ return podAddresses;
+ }
+ /**
+ * Gets the FQDN of the initially ACTIVE provisioning server (POD).
+ * Note: this used to be called isActivePOD(), however, that is a misnomer, as the active status
+ * could shift to the standby POD without these parameters changing. Hence, the function names
+ * have been changed to more accurately reflect their purpose.
+ * @return the FQDN
+ */
+ public static boolean isInitialActivePOD() {
+ return this_pod.equals(initial_active_pod);
+ }
+ /**
+ * Gets the FQDN of the initially STANDBY provisioning server (POD).
+ * Note: this used to be called isStandbyPOD(), however, that is a misnomer, as the standby status
+ * could shift to the active POD without these parameters changing. Hence, the function names
+ * have been changed to more accurately reflect their purpose.
+ * @return the FQDN
+ */
+ public static boolean isInitialStandbyPOD() {
+ return this_pod.equals(initial_standby_pod);
+ }
+ /**
+ * INSERT an {@link Insertable} bean into the database.
+ * @param bean the bean representing a row to insert
+ * @return true if the INSERT was successful
+ */
+ protected boolean doInsert(Insertable bean) {
+ boolean rv = false;
+ DB db = new DB();
+ Connection conn = null;
+ try {
+ conn = db.getConnection();
+ rv = bean.doInsert(conn);
+ } catch (SQLException e) {
+ rv = false;
+ intlogger.warn("PROV0005 doInsert: "+e.getMessage());
+ e.printStackTrace();
+ } finally {
+ if (conn != null)
+ db.release(conn);
+ }
+ return rv;
+ }
+ /**
+ * UPDATE an {@link Updateable} bean in the database.
+ * @param bean the bean representing a row to update
+ * @return true if the UPDATE was successful
+ */
+ protected boolean doUpdate(Updateable bean) {
+ boolean rv = false;
+ DB db = new DB();
+ Connection conn = null;
+ try {
+ conn = db.getConnection();
+ rv = bean.doUpdate(conn);
+ } catch (SQLException e) {
+ rv = false;
+ intlogger.warn("PROV0006 doUpdate: "+e.getMessage());
+ e.printStackTrace();
+ } finally {
+ if (conn != null)
+ db.release(conn);
+ }
+ return rv;
+ }
+ /**
+ * DELETE an {@link Deleteable} bean from the database.
+ * @param bean the bean representing a row to delete
+ * @return true if the DELETE was successful
+ */
+ protected boolean doDelete(Deleteable bean) {
+ boolean rv = false;
+ DB db = new DB();
+ Connection conn = null;
+ try {
+ conn = db.getConnection();
+ rv = bean.doDelete(conn);
+ } catch (SQLException e) {
+ rv = false;
+ intlogger.warn("PROV0007 doDelete: "+e.getMessage());
+ e.printStackTrace();
+ } finally {
+ if (conn != null)
+ db.release(conn);
+ }
+ return rv;
+ }
+ private static boolean getBoolean(Map<String,String> map, String name) {
+ String s = map.get(name);
+ return (s != null) && s.equalsIgnoreCase("true");
+ }
+ private static String getString(Map<String,String> map, String name, String dflt) {
+ String s = map.get(name);
+ return (s != null) ? s : dflt;
+ }
+ private static int getInt(Map<String,String> map, String name, int dflt) {
+ try {
+ String s = map.get(name);
+ return Integer.parseInt(s);
+ } catch (NumberFormatException e) {
+ return dflt;
+ }
+ }
+ private static Set<String> getSet(Map<String,String> map, String name) {
+ Set<String> set = new HashSet<String>();
+ String s = map.get(name);
+ if (s != null) {
+ String[] pp = s.split("\\|");
+ if (pp != null) {
+ for (String t : pp) {
+ String t2 = t.trim();
+ if (t2.length() > 0)
+ set.add(t2);
+ }
+ }
+ }
+ return set;
+ }
+
+ /**
+ * A class used to encapsulate a Content-type header, separating out the "version" attribute
+ * (which defaults to "1.0" if missing).
+ */
+ public class ContentHeader {
+ private String type = "";
+ private Map<String, String> map = new HashMap<String, String>();
+ public ContentHeader() {
+ this("", "1.0");
+ }
+ public ContentHeader(String t, String v) {
+ type = t.trim();
+ map.put("version", v);
+ }
+ public String getType() {
+ return type;
+ }
+ public String getAttribute(String key) {
+ String s = map.get(key);
+ if (s == null)
+ s = "";
+ return s;
+ }
+ }
+
+ /**
+ * Get the ContentHeader from an HTTP request.
+ * @param req the request
+ * @return the header, encapsulated in a ContentHeader object
+ */
+ public ContentHeader getContentHeader(HttpServletRequest req) {
+ ContentHeader ch = new ContentHeader();
+ String s = req.getHeader("Content-Type");
+ if (s != null) {
+ String[] pp = s.split(";");
+ ch.type = pp[0].trim();
+ for (int i = 1; i < pp.length; i++) {
+ int ix = pp[i].indexOf('=');
+ if (ix > 0) {
+ String k = pp[i].substring(0, ix).trim();
+ String v = pp[i].substring(ix+1).trim();
+ ch.map.put(k, v);
+ } else {
+ ch.map.put(pp[i].trim(), "");
+ }
+ }
+ }
+ return ch;
+ }
+ // Methods for the Policy Engine classes - ProvDataProvider interface
+ @Override
+ public String getFeedOwner(String feedId) {
+ try {
+ int n = Integer.parseInt(feedId);
+ Feed f = Feed.getFeedById(n);
+ if (f != null)
+ return f.getPublisher();
+ } catch (NumberFormatException e) {
+ // ignore
+ }
+ return null;
+ }
+ @Override
+ public String getFeedClassification(String feedId) {
+ try {
+ int n = Integer.parseInt(feedId);
+ Feed f = Feed.getFeedById(n);
+ if (f != null)
+ return f.getAuthorization().getClassification();
+ } catch (NumberFormatException e) {
+ // ignore
+ }
+ return null;
+ }
+ @Override
+ public String getSubscriptionOwner(String subId) {
+ try {
+ int n = Integer.parseInt(subId);
+ Subscription s = Subscription.getSubscriptionById(n);
+ if (s != null)
+ return s.getSubscriber();
+ } catch (NumberFormatException e) {
+ // ignore
+ }
+ return null;
+ }
+
+ /*
+ * @Method - isUserMemberOfGroup - Rally:US708115
+ * @Params - group object and user to check if exists in given group
+ * @return - boolean value /true/false
+ */
+ private boolean isUserMemberOfGroup(Group group, String user) {
+
+ String groupdetails = group.getMembers().replace("]", "").replace("[", "");
+ String s[] = groupdetails.split("},");
+
+ for(int i=0; i < s.length; i++) {
+ JSONObject jsonObj = null;
+ try {
+ jsonObj = new JSONObject(s[i]+"}");
+ if(jsonObj.get("id").equals(user))
+ return true;
+ } catch (JSONException e) {
+ e.printStackTrace();
+ }
+ }
+ return false;
+
+ }
+
+ /*
+ * @Method - getGroupByFeedGroupId- Rally:US708115
+ * @Params - User to check in group and feedid which is assigned the group.
+ * @return - string value grupid/null
+ */
+ @Override
+ public String getGroupByFeedGroupId(String owner, String feedId) {
+ try {
+ int n = Integer.parseInt(feedId);
+ Feed f = Feed.getFeedById(n);
+ if (f != null) {
+ int groupid = f.getGroupid();
+ if(groupid > 0) {
+ Group group = Group.getGroupById(groupid);
+ if(isUserMemberOfGroup(group, owner)) {
+ return group.getAuthid();
+ }
+ }
+ }
+ } catch (NumberFormatException e) {
+ // ignore
+ }
+ return null;
+ }
+
+ /*
+ * @Method - getGroupBySubGroupId - Rally:US708115
+ * @Params - User to check in group and subid which is assigned the group.
+ * @return - string value grupid/null
+ */
+ @Override
+ public String getGroupBySubGroupId(String owner, String subId) {
+ try {
+ int n = Integer.parseInt(subId);
+ Subscription s = Subscription.getSubscriptionById(n);
+ if (s != null) {
+ int groupid = s.getGroupid();
+ if(groupid > 0) {
+ Group group = Group.getGroupById(groupid);
+ if(isUserMemberOfGroup(group, owner)) {
+ return group.getAuthid();
+ }
+ }
+ }
+ } catch (NumberFormatException e) {
+ // ignore
+ }
+ return null;
+ }
+
+ /*
+ * @Method - setIpAndFqdnForEelf - Rally:US664892
+ * @Params - method, prints method name in EELF log.
+ */
+ protected void setIpAndFqdnForEelf(String method) {
+ MDC.clear();
+ MDC.put(MDC_SERVICE_NAME, method);
+ try {
+ MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());
+ MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.IOException;\r
-import java.io.InvalidObjectException;\r
-import java.util.List;\r
-\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.json.JSONObject;\r
-import org.onap.dmaap.datarouter.authz.AuthorizationResponse;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;\r
-import org.onap.dmaap.datarouter.provisioning.utils.JSONUtilities;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * This servlet handles provisioning for the <drFeedsURL> which is the URL on the\r
- * provisioning server used to create new feeds. It supports POST to create new feeds,\r
- * and GET to support the Feeds Collection Query function.\r
- *\r
- * @author Robert Eby\r
- * @version $Id$\r
- */\r
-@SuppressWarnings("serial")\r
-public class DRFeedsServlet extends ProxyServlet {\r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.DRFeedsServlet");\r
- \r
- /**\r
- * DELETE on the <drFeedsURL> -- not supported.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String message = "DELETE not allowed for the drFeedsURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * GET on the <drFeedsURL> -- query the list of feeds already existing in the DB.\r
- * See the <i>Feeds Collection Queries</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- String path = req.getRequestURI(); // Note: I think this should be getPathInfo(), but that doesn't work (Jetty bug?)\r
- if (path != null && !path.equals("/")) {\r
- message = "Bad URL.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
-\r
- String name = req.getParameter("name");\r
- String vers = req.getParameter("version");\r
- String publ = req.getParameter("publisher");\r
- String subs = req.getParameter("subscriber");\r
- if (name != null && vers != null) {\r
- // Display a specific feed\r
- Feed feed = Feed.getFeedByNameVersion(name, vers);\r
- if (feed == null || feed.isDeleted()) {\r
- message = "This feed does not exist in the database.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- } else {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(FEEDFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(feed.asJSONObject(true).toString());\r
- }\r
- } else {\r
- // Display a list of URLs\r
- List<String> list = null;\r
- if (name != null) {\r
- list = Feed.getFilteredFeedUrlList("name", name);\r
- } else if (publ != null) {\r
- list = Feed.getFilteredFeedUrlList("publ", publ);\r
- } else if (subs != null) {\r
- list = Feed.getFilteredFeedUrlList("subs", subs);\r
- } else {\r
- list = Feed.getFilteredFeedUrlList("all", null);\r
- }\r
- String t = JSONUtilities.createJSONArray(list);\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(FEEDLIST_CONTENT_TYPE);\r
- resp.getOutputStream().print(t);\r
- }\r
- }\r
- /**\r
- * PUT on the <drFeedsURL> -- not supported.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String message = "PUT not allowed for the drFeedsURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * POST on the <drFeedsURL> -- create a new feed.\r
- * See the <i>Creating a Feed</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPost(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- String path = req.getRequestURI(); // Note: I think this should be getPathInfo(), but that doesn't work (Jetty bug?)\r
- if (path != null && !path.equals("/")) {\r
- message = "Bad URL.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // check content type is FEED_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(FEED_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug(jo.toString());\r
- if (++active_feeds > max_feeds) {\r
- active_feeds--;\r
- message = "Cannot create feed; the maximum number of feeds has been configured.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_CONFLICT);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_CONFLICT, message);\r
- return;\r
- }\r
- Feed feed = null;\r
- try {\r
- feed = new Feed(jo);\r
- } catch (InvalidObjectException e) {\r
- message = e.getMessage();\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- feed.setPublisher(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header\r
-\r
- // Check if this feed already exists\r
- Feed feed2 = Feed.getFeedByNameVersion(feed.getName(), feed.getVersion());\r
- if (feed2 != null) {\r
- message = "This feed already exists in the database.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
-\r
- // Create FEED table entries\r
- if (doInsert(feed)) {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_CREATED);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_CREATED);\r
- resp.setContentType(FEEDFULL_CONTENT_TYPE);\r
- resp.setHeader("Location", feed.getLinks().getSelf());\r
- resp.getOutputStream().print(feed.asLimitedJSONObject().toString());\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the INSERT\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.JSONObject;
+import org.onap.dmaap.datarouter.authz.AuthorizationResponse;
+import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;
+import org.onap.dmaap.datarouter.provisioning.utils.JSONUtilities;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * This servlet handles provisioning for the <drFeedsURL> which is the URL on the
+ * provisioning server used to create new feeds. It supports POST to create new feeds,
+ * and GET to support the Feeds Collection Query function.
+ *
+ * @author Robert Eby
+ * @version $Id$
+ */
+@SuppressWarnings("serial")
+public class DRFeedsServlet extends ProxyServlet {
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.DRFeedsServlet");
+
+ /**
+ * DELETE on the <drFeedsURL> -- not supported.
+ */
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ String message = "DELETE not allowed for the drFeedsURL.";
+ EventLogRecord elr = new EventLogRecord(req);
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);
+ }
+ /**
+ * GET on the <drFeedsURL> -- query the list of feeds already existing in the DB.
+ * See the <i>Feeds Collection Queries</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doGet(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ String path = req.getRequestURI(); // Note: I think this should be getPathInfo(), but that doesn't work (Jetty bug?)
+ if (path != null && !path.equals("/")) {
+ message = "Bad URL.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+
+ String name = req.getParameter("name");
+ String vers = req.getParameter("version");
+ String publ = req.getParameter("publisher");
+ String subs = req.getParameter("subscriber");
+ if (name != null && vers != null) {
+ // Display a specific feed
+ Feed feed = Feed.getFeedByNameVersion(name, vers);
+ if (feed == null || feed.isDeleted()) {
+ message = "This feed does not exist in the database.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ } else {
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(FEEDFULL_CONTENT_TYPE);
+ resp.getOutputStream().print(feed.asJSONObject(true).toString());
+ }
+ } else {
+ // Display a list of URLs
+ List<String> list = null;
+ if (name != null) {
+ list = Feed.getFilteredFeedUrlList("name", name);
+ } else if (publ != null) {
+ list = Feed.getFilteredFeedUrlList("publ", publ);
+ } else if (subs != null) {
+ list = Feed.getFilteredFeedUrlList("subs", subs);
+ } else {
+ list = Feed.getFilteredFeedUrlList("all", null);
+ }
+ String t = JSONUtilities.createJSONArray(list);
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(FEEDLIST_CONTENT_TYPE);
+ resp.getOutputStream().print(t);
+ }
+ }
+ /**
+ * PUT on the <drFeedsURL> -- not supported.
+ */
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ String message = "PUT not allowed for the drFeedsURL.";
+ EventLogRecord elr = new EventLogRecord(req);
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);
+ }
+ /**
+ * POST on the <drFeedsURL> -- create a new feed.
+ * See the <i>Creating a Feed</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPost");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doPost(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ String path = req.getRequestURI(); // Note: I think this should be getPathInfo(), but that doesn't work (Jetty bug?)
+ if (path != null && !path.equals("/")) {
+ message = "Bad URL.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // check content type is FEED_CONTENT_TYPE, version 1.0
+ ContentHeader ch = getContentHeader(req);
+ String ver = ch.getAttribute("version");
+ if (!ch.getType().equals(FEED_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {
+ message = "Incorrect content-type";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ JSONObject jo = getJSONfromInput(req);
+ if (jo == null) {
+ message = "Badly formed JSON";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (intlogger.isDebugEnabled())
+ intlogger.debug(jo.toString());
+ if (++active_feeds > max_feeds) {
+ active_feeds--;
+ message = "Cannot create feed; the maximum number of feeds has been configured.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_CONFLICT);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_CONFLICT, message);
+ return;
+ }
+ Feed feed = null;
+ try {
+ feed = new Feed(jo);
+ } catch (InvalidObjectException e) {
+ message = e.getMessage();
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ feed.setPublisher(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header
+
+ // Check if this feed already exists
+ Feed feed2 = Feed.getFeedByNameVersion(feed.getName(), feed.getVersion());
+ if (feed2 != null) {
+ message = "This feed already exists in the database.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+
+ // Create FEED table entries
+ if (doInsert(feed)) {
+ // send response
+ elr.setResult(HttpServletResponse.SC_CREATED);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_CREATED);
+ resp.setContentType(FEEDFULL_CONTENT_TYPE);
+ resp.setHeader("Location", feed.getLinks().getSelf());
+ resp.getOutputStream().print(feed.asLimitedJSONObject().toString());
+ provisioningDataChanged();
+ } else {
+ // Something went wrong with the INSERT
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ }
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*/\r
@SuppressWarnings("serial")\r
public class FeedLogServlet extends LogServlet {\r
- public FeedLogServlet() {\r
- super(true);\r
- }\r
+ public FeedLogServlet() {\r
+ super(true);\r
+ }\r
}\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.IOException;\r
-import java.io.InvalidObjectException;\r
-\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.json.JSONObject;\r
-import org.onap.dmaap.datarouter.authz.AuthorizationResponse;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * This servlet handles provisioning for the <feedURL> which is generated by the provisioning\r
- * server to handle a particular feed. It supports DELETE to mark the feed as deleted,\r
- * and GET to retrieve information about the feed, and PUT to modify the feed.\r
- *\r
- * @author Robert Eby\r
- * @version $Id$\r
- */\r
-@SuppressWarnings("serial")\r
-public class FeedServlet extends ProxyServlet {\r
-\r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.FeedServlet");\r
-\r
- /**\r
- * Delete the Feed at the address /feed/<feednumber>.\r
- * See the <i>Deleting a Feed</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doDelete(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int feedid = getIdFromPath(req);\r
- if (feedid < 0) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Feed feed = Feed.getFeedById(feedid);\r
- if (feed == null || feed.isDeleted()) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
-\r
- // Delete FEED table entry (set DELETED flag)\r
- feed.setDeleted(true);\r
- if (doUpdate(feed)) {\r
- active_feeds--;\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_NO_CONTENT);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the UPDATE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
- /**\r
- * Get information on the feed at the address /feed/<feednumber>.\r
- * See the <i>Retrieving Information about a Feed</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int feedid = getIdFromPath(req);\r
- if (feedid < 0) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Feed feed = Feed.getFeedById(feedid);\r
- if (feed == null || feed.isDeleted()) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
-\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(FEEDFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(feed.asJSONObject(true).toString());\r
- }\r
- /**\r
- * PUT on the <feedURL> for a feed.\r
- * See the <i>Modifying a Feed</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPut(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int feedid = getIdFromPath(req);\r
- if (feedid < 0) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Feed oldFeed = Feed.getFeedById(feedid);\r
- if (oldFeed == null || oldFeed.isDeleted()) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // check content type is FEED_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(FEED_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug(jo.toString());\r
- Feed feed = null;\r
- try {\r
- feed = new Feed(jo);\r
- } catch (InvalidObjectException e) {\r
- message = e.getMessage();\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- feed.setFeedid(feedid);\r
- feed.setPublisher(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header\r
-\r
- String subjectgroup = (req.getHeader("X-ATT-DR-ON-BEHALF-OF-GROUP")); //Adding for group feature:Rally US708115 \r
- if (!oldFeed.getPublisher().equals(feed.getPublisher()) && subjectgroup == null) {\r
- message = "This feed must be modified by the same publisher that created it.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (!oldFeed.getName().equals(feed.getName())) {\r
- message = "The name of the feed may not be updated.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (!oldFeed.getVersion().equals(feed.getVersion())) {\r
- message = "The version of the feed may not be updated.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
-\r
- // Update FEEDS table entries\r
- if (doUpdate(feed)) {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(FEEDFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(feed.asLimitedJSONObject().toString());\r
-\r
- \r
- /**Change Owner ship of Feed //Adding for group feature:Rally US708115*/\r
- if (jo.has("changeowner") && subjectgroup != null) {\r
- Boolean changeowner = (Boolean) jo.get("changeowner");\r
- if (changeowner != null && changeowner.equals(true)) {\r
- feed.setPublisher(req.getHeader(BEHALF_HEADER));\r
- feed.changeOwnerShip();\r
- }\r
- }\r
- /***End of change ownership*/\r
-\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the UPDATE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
- /**\r
- * POST on the <feedURL> -- not supported.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- String message = "POST not allowed for the feedURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.IOException;
+import java.io.InvalidObjectException;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.JSONObject;
+import org.onap.dmaap.datarouter.authz.AuthorizationResponse;
+import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * This servlet handles provisioning for the <feedURL> which is generated by the provisioning
+ * server to handle a particular feed. It supports DELETE to mark the feed as deleted,
+ * and GET to retrieve information about the feed, and PUT to modify the feed.
+ *
+ * @author Robert Eby
+ * @version $Id$
+ */
+@SuppressWarnings("serial")
+public class FeedServlet extends ProxyServlet {
+
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.FeedServlet");
+
+ /**
+ * Delete the Feed at the address /feed/<feednumber>.
+ * See the <i>Deleting a Feed</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doDelete(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ int feedid = getIdFromPath(req);
+ if (feedid < 0) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Feed feed = Feed.getFeedById(feedid);
+ if (feed == null || feed.isDeleted()) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+
+ // Delete FEED table entry (set DELETED flag)
+ feed.setDeleted(true);
+ if (doUpdate(feed)) {
+ active_feeds--;
+ // send response
+ elr.setResult(HttpServletResponse.SC_NO_CONTENT);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);
+ provisioningDataChanged();
+ } else {
+ // Something went wrong with the UPDATE
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ }
+ /**
+ * Get information on the feed at the address /feed/<feednumber>.
+ * See the <i>Retrieving Information about a Feed</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doGet(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ int feedid = getIdFromPath(req);
+ if (feedid < 0) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Feed feed = Feed.getFeedById(feedid);
+ if (feed == null || feed.isDeleted()) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(FEEDFULL_CONTENT_TYPE);
+ resp.getOutputStream().print(feed.asJSONObject(true).toString());
+ }
+ /**
+ * PUT on the <feedURL> for a feed.
+ * See the <i>Modifying a Feed</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doPut(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ int feedid = getIdFromPath(req);
+ if (feedid < 0) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Feed oldFeed = Feed.getFeedById(feedid);
+ if (oldFeed == null || oldFeed.isDeleted()) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // check content type is FEED_CONTENT_TYPE, version 1.0
+ ContentHeader ch = getContentHeader(req);
+ String ver = ch.getAttribute("version");
+ if (!ch.getType().equals(FEED_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {
+ message = "Incorrect content-type";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);
+ return;
+ }
+ JSONObject jo = getJSONfromInput(req);
+ if (jo == null) {
+ message = "Badly formed JSON";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (intlogger.isDebugEnabled())
+ intlogger.debug(jo.toString());
+ Feed feed = null;
+ try {
+ feed = new Feed(jo);
+ } catch (InvalidObjectException e) {
+ message = e.getMessage();
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ feed.setFeedid(feedid);
+ feed.setPublisher(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header
+
+ String subjectgroup = (req.getHeader("X-ATT-DR-ON-BEHALF-OF-GROUP")); //Adding for group feature:Rally US708115
+ if (!oldFeed.getPublisher().equals(feed.getPublisher()) && subjectgroup == null) {
+ message = "This feed must be modified by the same publisher that created it.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (!oldFeed.getName().equals(feed.getName())) {
+ message = "The name of the feed may not be updated.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (!oldFeed.getVersion().equals(feed.getVersion())) {
+ message = "The version of the feed may not be updated.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+
+ // Update FEEDS table entries
+ if (doUpdate(feed)) {
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(FEEDFULL_CONTENT_TYPE);
+ resp.getOutputStream().print(feed.asLimitedJSONObject().toString());
+
+
+ /**Change Owner ship of Feed //Adding for group feature:Rally US708115*/
+ if (jo.has("changeowner") && subjectgroup != null) {
+ Boolean changeowner = (Boolean) jo.get("changeowner");
+ if (changeowner != null && changeowner.equals(true)) {
+ feed.setPublisher(req.getHeader(BEHALF_HEADER));
+ feed.changeOwnerShip();
+ }
+ }
+ /***End of change ownership*/
+
+ provisioningDataChanged();
+ } else {
+ // Something went wrong with the UPDATE
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ }
+ /**
+ * POST on the <feedURL> -- not supported.
+ */
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPost");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
+ String message = "POST not allowed for the feedURL.";
+ EventLogRecord elr = new EventLogRecord(req);
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.IOException;\r
-import java.io.InvalidObjectException;\r
-import java.util.Collection;\r
-\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.json.JSONObject;\r
-import org.onap.dmaap.datarouter.authz.AuthorizationResponse;\r
-import org.onap.dmaap.datarouter.provisioning.BaseServlet.ContentHeader;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Group;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Subscription;\r
-import org.onap.dmaap.datarouter.provisioning.utils.JSONUtilities;\r
-\r
-/**\r
- * This servlet handles provisioning for the <groups> which is generated by the provisioning\r
- * server to handle the creation and inspection of groups for FEEDS and SUBSCRIPTIONS.\r
- *\r
- * @author Vikram Singh\r
- * @version $Id$\r
- * @version $Id: Group.java,v 1.0 2016/07/19\r
- */\r
-@SuppressWarnings("serial")\r
-public class GroupServlet extends ProxyServlet {\r
- /**\r
- * DELETE on the <GRUPS> -- not supported.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- String message = "DELETE not allowed for the GROUPS.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * GET on the the list of groups to a feed/sub.\r
- * See the <i>Groups Collection Query</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- \r
- // Check with the Authorizer\r
- /*AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }*/\r
- \r
- \r
- /*ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(GROUPLIST_CONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }*/\r
- \r
- \r
- int groupid = getIdFromPath(req);\r
- if (groupid < 0) {\r
- message = "Missing or bad group number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- \r
- Group gup = Group.getGroupById(groupid);\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(GROUPFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(gup.asJSONObject().toString());\r
-\r
- // Display a list of Groups\r
- /*Collection<Group> list = Group.getGroupById(groupid);\r
- String t = JSONUtilities.createJSONArray(list);\r
-\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(GROUPLIST_CONTENT_TYPE);\r
- resp.getOutputStream().print(t);*/\r
- }\r
- /**\r
- * PUT on the <GROUPS> -- not supported.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPut(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int groupid = getIdFromPath(req);\r
- if (groupid < 0) {\r
- message = "Missing or bad groupid.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Group oldgup = Group.getGroupById(groupid);\r
- if (oldgup == null) {\r
- message = "Missing or bad group number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- /*AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }*/\r
- // check content type is SUB_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(GROUP_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug(jo.toString());\r
- Group gup = null;\r
- try {\r
- gup = new Group(jo);\r
- } catch (InvalidObjectException e) {\r
- message = e.getMessage();\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- gup.setGroupid(oldgup.getGroupid());\r
- \r
- \r
- Group gb2 = Group.getGroupMatching(gup, oldgup.getGroupid());\r
- if (gb2 != null) {\r
- eventlogger.warn("PROV0011 Creating a duplicate Group: "+gup.getName());\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Duplicate Group:"+gup.getName());\r
- return;\r
- }\r
- \r
- // Update Groups table entries\r
- if (doUpdate(gup)) {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(GROUPFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(gup.asJSONObject().toString());\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the UPDATE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
- /**\r
- * POST on the <groups> -- create a new GROUPS to a feed.\r
- * See the <i>Creating a GROUPS</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPost(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- /*int feedid = getIdFromPath(req);\r
- if (feedid < 0) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Feed feed = Feed.getFeedById(feedid);\r
- if (feed == null || feed.isDeleted()) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }*/\r
- // Check with the Authorizer\r
- /*AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }*/\r
-\r
- // check content type is SUB_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(GROUP_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug(jo.toString());\r
- \r
- Group gup = null;\r
- try {\r
- gup = new Group(jo);\r
- } catch (InvalidObjectException e) {\r
- message = e.getMessage();\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- //gup.setFeedid(feedid);\r
- //sub.setSubscriber(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header\r
-\r
- // Check if this group already exists; not an error (yet), just warn\r
- Group gb2 = Group.getGroupMatching(gup);\r
- if (gb2 != null) {\r
- eventlogger.warn("PROV0011 Creating a duplicate Group: "+gup.getName());\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Duplicate Group:"+gup.getName());\r
- return;\r
- }\r
- \r
- \r
- // Create GROUPS table entries\r
- if (doInsert(gup)) {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_CREATED);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_CREATED);\r
- resp.setContentType(GROUPFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(gup.asJSONObject().toString());\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the INSERT\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.util.Collection;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.JSONObject;
+import org.onap.dmaap.datarouter.authz.AuthorizationResponse;
+import org.onap.dmaap.datarouter.provisioning.BaseServlet.ContentHeader;
+import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.Group;
+import org.onap.dmaap.datarouter.provisioning.beans.Subscription;
+import org.onap.dmaap.datarouter.provisioning.utils.JSONUtilities;
+
+/**
+ * This servlet handles provisioning for the <groups> which is generated by the provisioning
+ * server to handle the creation and inspection of groups for FEEDS and SUBSCRIPTIONS.
+ *
+ * @author Vikram Singh
+ * @version $Id$
+ * @version $Id: Group.java,v 1.0 2016/07/19
+ */
+@SuppressWarnings("serial")
+public class GroupServlet extends ProxyServlet {
+ /**
+ * DELETE on the <GRUPS> -- not supported.
+ */
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ String message = "DELETE not allowed for the GROUPS.";
+ EventLogRecord elr = new EventLogRecord(req);
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);
+ }
+ /**
+ * GET on the the list of groups to a feed/sub.
+ * See the <i>Groups Collection Query</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doGet(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+
+ // Check with the Authorizer
+ /*AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }*/
+
+
+ /*ContentHeader ch = getContentHeader(req);
+ String ver = ch.getAttribute("version");
+ if (!ch.getType().equals(GROUPLIST_CONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {
+ intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));
+ message = "Incorrect content-type";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);
+ return;
+ }*/
+
+
+ int groupid = getIdFromPath(req);
+ if (groupid < 0) {
+ message = "Missing or bad group number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+
+ Group gup = Group.getGroupById(groupid);
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(GROUPFULL_CONTENT_TYPE);
+ resp.getOutputStream().print(gup.asJSONObject().toString());
+
+ // Display a list of Groups
+ /*Collection<Group> list = Group.getGroupById(groupid);
+ String t = JSONUtilities.createJSONArray(list);
+
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(GROUPLIST_CONTENT_TYPE);
+ resp.getOutputStream().print(t);*/
+ }
+ /**
+ * PUT on the <GROUPS> -- not supported.
+ */
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doPut(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ int groupid = getIdFromPath(req);
+ if (groupid < 0) {
+ message = "Missing or bad groupid.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Group oldgup = Group.getGroupById(groupid);
+ if (oldgup == null) {
+ message = "Missing or bad group number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // Check with the Authorizer
+ /*AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }*/
+ // check content type is SUB_CONTENT_TYPE, version 1.0
+ ContentHeader ch = getContentHeader(req);
+ String ver = ch.getAttribute("version");
+ if (!ch.getType().equals(GROUP_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {
+ message = "Incorrect content-type";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);
+ return;
+ }
+ JSONObject jo = getJSONfromInput(req);
+ if (jo == null) {
+ message = "Badly formed JSON";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (intlogger.isDebugEnabled())
+ intlogger.debug(jo.toString());
+ Group gup = null;
+ try {
+ gup = new Group(jo);
+ } catch (InvalidObjectException e) {
+ message = e.getMessage();
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ gup.setGroupid(oldgup.getGroupid());
+
+
+ Group gb2 = Group.getGroupMatching(gup, oldgup.getGroupid());
+ if (gb2 != null) {
+ eventlogger.warn("PROV0011 Creating a duplicate Group: "+gup.getName());
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Duplicate Group:"+gup.getName());
+ return;
+ }
+
+ // Update Groups table entries
+ if (doUpdate(gup)) {
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(GROUPFULL_CONTENT_TYPE);
+ resp.getOutputStream().print(gup.asJSONObject().toString());
+ provisioningDataChanged();
+ } else {
+ // Something went wrong with the UPDATE
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ }
+ /**
+ * POST on the <groups> -- create a new GROUPS to a feed.
+ * See the <i>Creating a GROUPS</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doPost(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ /*int feedid = getIdFromPath(req);
+ if (feedid < 0) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Feed feed = Feed.getFeedById(feedid);
+ if (feed == null || feed.isDeleted()) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }*/
+ // Check with the Authorizer
+ /*AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }*/
+
+ // check content type is SUB_CONTENT_TYPE, version 1.0
+ ContentHeader ch = getContentHeader(req);
+ String ver = ch.getAttribute("version");
+ if (!ch.getType().equals(GROUP_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {
+ intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));
+ message = "Incorrect content-type";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);
+ return;
+ }
+ JSONObject jo = getJSONfromInput(req);
+ if (jo == null) {
+ message = "Badly formed JSON";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (intlogger.isDebugEnabled())
+ intlogger.debug(jo.toString());
+
+ Group gup = null;
+ try {
+ gup = new Group(jo);
+ } catch (InvalidObjectException e) {
+ message = e.getMessage();
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ //gup.setFeedid(feedid);
+ //sub.setSubscriber(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header
+
+ // Check if this group already exists; not an error (yet), just warn
+ Group gb2 = Group.getGroupMatching(gup);
+ if (gb2 != null) {
+ eventlogger.warn("PROV0011 Creating a duplicate Group: "+gup.getName());
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Duplicate Group:"+gup.getName());
+ return;
+ }
+
+
+ // Create GROUPS table entries
+ if (doInsert(gup)) {
+ // send response
+ elr.setResult(HttpServletResponse.SC_CREATED);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_CREATED);
+ resp.setContentType(GROUPFULL_CONTENT_TYPE);
+ resp.getOutputStream().print(gup.asJSONObject().toString());
+ provisioningDataChanged();
+ } else {
+ // Something went wrong with the INSERT
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.ByteArrayOutputStream;\r
-import java.io.File;\r
-import java.io.IOException;\r
-import java.io.InputStream;\r
-import java.nio.file.FileStore;\r
-import java.nio.file.FileSystem;\r
-import java.nio.file.Files;\r
-import java.nio.file.Path;\r
-import java.nio.file.Paths;\r
-import java.nio.file.StandardCopyOption;\r
-import java.util.Properties;\r
-\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.json.JSONArray;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.LogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Parameters;\r
-import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;\r
-import org.onap.dmaap.datarouter.provisioning.utils.DB;\r
-import org.onap.dmaap.datarouter.provisioning.utils.LogfileLoader;\r
-import org.onap.dmaap.datarouter.provisioning.utils.RLEBitSet;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * <p>\r
- * This servlet handles requests to URLs under /internal on the provisioning server.\r
- * These include:\r
- * </p>\r
- * <div class="contentContainer">\r
- * <table class="packageSummary" border="0" cellpadding="3" cellspacing="0">\r
- * <caption><span>URL Path Summary</span><span class="tabEnd"> </span></caption>\r
- * <tr>\r
- * <th class="colFirst" width="15%">URL Path</th>\r
- * <th class="colOne">Method</th>\r
- * <th class="colLast">Purpose</th>\r
- * </tr>\r
- * <tr class="altColor">\r
- * <td class="colFirst">/internal/prov</td>\r
- * <td class="colOne">GET</td>\r
- * <td class="colLast">used to GET a full JSON copy of the provisioning data.</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colFirst">/internal/fetchProv</td>\r
- * <td class="colOne">GET</td>\r
- * <td class="colLast">used to signal to a standby POD that the provisioning data should be fetched from the active POD.</td>\r
- * </tr>\r
- * <tr class="altColor">\r
- * <td class="colFirst" rowspan="2">/internal/logs</td>\r
- * <td class="colOne">GET</td>\r
- * <td class="colLast">used to GET an index of log files and individual logs for this provisioning server.</td>\r
- * </tr>\r
- * <tr class="altColor">\r
- * <td class="colOne">POST</td>\r
- * <td class="colLast">used to POST log files from the individual nodes to this provisioning server.</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colFirst" rowspan="4">/internal/api</td>\r
- * <td class="colOne">GET</td>\r
- * <td class="colLast">used to GET an individual parameter value. The parameter name is specified by the path after /api/.</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colOne">PUT</td>\r
- * <td class="colLast">used to set an individual parameter value. The parameter name is specified by the path after /api/.</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colOne">DELETE</td>\r
- * <td class="colLast">used to remove an individual parameter value. The parameter name is specified by the path after /api/.</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colOne">POST</td>\r
- * <td class="colLast">used to create a new individual parameter value. The parameter name is specified by the path after /api/.</td>\r
- * </tr>\r
- * <tr class="altColor">\r
- * <td class="colFirst">/internal/halt</td>\r
- * <td class="colOne">GET</td>\r
- * <td class="colLast">used to halt the server (must be accessed from 127.0.0.1).</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colFirst" rowspan="2">/internal/drlogs</td>\r
- * <td class="colOne">GET</td>\r
- * <td class="colLast">used to get a list of DR log entries available for retrieval.\r
- * Note: these are the actual data router log entries sent to the provisioning server\r
- * by the nodes, not the provisioning server's internal logs (access via /internal/logs above).\r
- * The range is returned as a list of record sequence numbers.</td>\r
- * </tr>\r
- * <tr class="rowColor">\r
- * <td class="colOne">POST</td>\r
- * <td class="colLast">used to retrieve specific log entries.\r
- * The sequence numbers of the records to fetch are POST-ed; the records matching the sequence numbers are returned.</td>\r
- * </tr>\r
- * <tr class="altColor">\r
- * <td class="colFirst">/internal/route/*</td>\r
- * <td class="colOne">*</td>\r
- * <td class="colLast">URLs under this path are handled via the {@link org.onap.dmaap.datarouter.provisioning.RouteServlet}</td>\r
- * </tr>\r
- * </table>\r
- * </div>\r
- * <p>\r
- * Authorization to use these URLs is a little different than for other URLs on the provisioning server.\r
- * For the most part, the IP address that the request comes from should be either:\r
- * </p>\r
- * <ol>\r
- * <li>an IP address of a provisioning server, or</li>\r
- * <li>the IP address of a node (to allow access to /internal/prov), or</li>\r
- * <li>an IP address from the "<i>special subnet</i>" which is configured with\r
- * the PROV_SPECIAL_SUBNET parameter.\r
- * </ol>\r
- * <p>\r
- * In addition, requests to /internal/halt can ONLY come from localhost (127.0.0.1) on the HTTP port.\r
- * </p>\r
- * <p>\r
- * All DELETE/GET/PUT/POST requests made to /internal/api on this servlet on the standby server are\r
- * proxied to the active server (using the {@link ProxyServlet}) if it is up and reachable.\r
- * </p>\r
- *\r
- * @author Robert Eby\r
- * @version $Id: InternalServlet.java,v 1.23 2014/03/24 18:47:10 eby Exp $\r
- */\r
-@SuppressWarnings("serial")\r
-public class InternalServlet extends ProxyServlet {\r
- private static Integer logseq = new Integer(0); // another piece of info to make log spool file names unique\r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.InternalServlet");\r
-\r
- /**\r
- * Delete a parameter at the address /internal/api/<parameter>.\r
- * See the <b>Internal API</b> document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
-\r
- String path = req.getPathInfo();\r
- if (path.startsWith("/api/")) {\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doDelete(req, resp);\r
- return;\r
- }\r
- String key = path.substring(5);\r
- if (key.length() > 0) {\r
- Parameters param = Parameters.getParameter(key);\r
- if (param != null) {\r
- if (doDelete(param)) {\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- provisioningDataChanged();\r
- provisioningParametersChanged();\r
- } else {\r
- // Something went wrong with the DELETE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- return;\r
- }\r
- }\r
- }\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- }\r
- /**\r
- * Get some information (such as a parameter) underneath the /internal/ namespace.\r
- * See the <b>Internal API</b> document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String path = req.getPathInfo();\r
- if (path.equals("/halt") && !req.isSecure()) {\r
- // request to halt the server - can ONLY come from localhost\r
- String remote = req.getRemoteAddr();\r
- if (remote.equals("127.0.0.1")) {\r
- intlogger.info("PROV0009 Request to HALT received.");\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- Main.shutdown();\r
- } else {\r
- intlogger.info("PROV0010 Disallowed request to HALT received from "+remote);\r
- resp.setStatus(HttpServletResponse.SC_FORBIDDEN);\r
- }\r
- return;\r
- }\r
-\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
- if (path.equals("/fetchProv") && !req.isSecure()) {\r
- // if request came from active_pod or standby_pod and it is not us, reload prov data\r
- SynchronizerTask s = SynchronizerTask.getSynchronizer();\r
- s.doFetch();\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- return;\r
- }\r
- if (path.equals("/prov")) {\r
- if (isProxyOK(req) && isProxyServer()) {\r
- if (super.doGetWithFallback(req, resp))\r
- return;\r
- // fall back to returning the local data if the remote is unreachable\r
- intlogger.info("Active server unavailable; falling back to local copy.");\r
- }\r
- Poker p = Poker.getPoker();\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(PROVFULL_CONTENT_TYPE2);\r
- resp.getOutputStream().print(p.getProvisioningString());\r
- return;\r
- }\r
- if (path.equals("/logs") || path.equals("/logs/")) {\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType("application/json");\r
- resp.getOutputStream().print(generateLogfileList().toString());\r
- return;\r
- }\r
- if (path.startsWith("/logs/")) {\r
- Properties p = (new DB()).getProperties();\r
- String logdir = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");\r
- String logfile = path.substring(6);\r
- if (logdir != null && logfile != null && logfile.indexOf('/') < 0) {\r
- File log = new File(logdir + "/" + logfile);\r
- if (log.exists() && log.isFile()) {\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType("text/plain");\r
- Path logpath = Paths.get(log.getAbsolutePath());\r
- Files.copy(logpath, resp.getOutputStream());\r
- return;\r
- }\r
- }\r
- resp.sendError(HttpServletResponse.SC_NO_CONTENT, "No file.");\r
- return;\r
- }\r
- if (path.startsWith("/api/")) {\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
- String key = path.substring(5);\r
- if (key.length() > 0) {\r
- Parameters param = Parameters.getParameter(key);\r
- if (param != null) {\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType("text/plain");\r
- resp.getOutputStream().print(param.getValue() + "\n");\r
- return;\r
- }\r
- }\r
- }\r
- if (path.equals("/drlogs") || path.equals("/drlogs/")) {\r
- // Special POD <=> POD API to determine what log file records are loaded here\r
- LogfileLoader lfl = LogfileLoader.getLoader();\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType("text/plain");\r
- resp.getOutputStream().print(lfl.getBitSet().toString());\r
- return;\r
- }\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- }\r
- /**\r
- * Modify a parameter at the address /internal/api/<parameter>.\r
- * See the <b>Internal API</b> document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
- String path = req.getPathInfo();\r
- if (path.startsWith("/api/")) {\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doPut(req, resp);\r
- return;\r
- }\r
- String key = path.substring(5);\r
- if (key.length() > 0) {\r
- Parameters param = Parameters.getParameter(key);\r
- if (param != null) {\r
- String t = catValues(req.getParameterValues("val"));\r
- param.setValue(t);\r
- if (doUpdate(param)) {\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- provisioningDataChanged();\r
- provisioningParametersChanged();\r
- } else {\r
- // Something went wrong with the UPDATE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- return;\r
- }\r
- }\r
- }\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- }\r
- /**\r
- * Create some new information (such as a parameter or log entries) underneath the /internal/ namespace.\r
- * See the <b>Internal API</b> document for details on how this method should be invoked.\r
- */\r
- @SuppressWarnings("resource")\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
-\r
- String path = req.getPathInfo();\r
- if (path.startsWith("/api/")) {\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doPost(req, resp);\r
- return;\r
- }\r
- String key = path.substring(5);\r
- if (key.length() > 0) {\r
- Parameters param = Parameters.getParameter(key);\r
- if (param == null) {\r
- String t = catValues(req.getParameterValues("val"));\r
- param = new Parameters(key, t);\r
- if (doInsert(param)) {\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- provisioningDataChanged();\r
- provisioningParametersChanged();\r
- } else {\r
- // Something went wrong with the INSERT\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- return;\r
- }\r
- }\r
- }\r
-\r
- if (path.equals("/logs") || path.equals("/logs/")) {\r
- String ctype = req.getHeader("Content-Type");\r
- if (ctype == null || !ctype.equals("text/plain")) {\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- elr.setMessage("Bad media type: "+ctype);\r
- resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- return;\r
- }\r
- String spooldir = (new DB()).getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");\r
- String spoolname = String.format("%d-%d-", System.currentTimeMillis(), Thread.currentThread().getId());\r
- synchronized (logseq) {\r
- // perhaps unnecessary, but it helps make the name unique\r
- spoolname += logseq.toString();\r
- logseq++;\r
- }\r
- String encoding = req.getHeader("Content-Encoding");\r
- if (encoding != null) {\r
- if (encoding.trim().equals("gzip")) {\r
- spoolname += ".gz";\r
- } else {\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- return;\r
- }\r
- }\r
- // Determine space available -- available space must be at least 5%\r
- FileSystem fs = (Paths.get(spooldir)).getFileSystem();\r
- long total = 0;\r
- long avail = 0;\r
- for (FileStore store: fs.getFileStores()) {\r
- total += store.getTotalSpace();\r
- avail += store.getUsableSpace();\r
- }\r
- try { fs.close(); } catch (Exception e) { }\r
- if (((avail * 100) / total) < 5) {\r
- elr.setResult(HttpServletResponse.SC_SERVICE_UNAVAILABLE);\r
- resp.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE);\r
- eventlogger.info(elr);\r
- return;\r
- }\r
- Path tmppath = Paths.get(spooldir, spoolname);\r
- Path donepath = Paths.get(spooldir, "IN."+spoolname);\r
- Files.copy(req.getInputStream(), Paths.get(spooldir, spoolname), StandardCopyOption.REPLACE_EXISTING);\r
- Files.move(tmppath, donepath, StandardCopyOption.REPLACE_EXISTING);\r
- elr.setResult(HttpServletResponse.SC_CREATED);\r
- resp.setStatus(HttpServletResponse.SC_CREATED);\r
- eventlogger.info(elr);\r
- LogfileLoader.getLoader(); // This starts the logfile loader "task"\r
- return;\r
- }\r
-\r
- if (path.equals("/drlogs") || path.equals("/drlogs/")) {\r
- // Receive post request and generate log entries\r
- String ctype = req.getHeader("Content-Type");\r
- if (ctype == null || !ctype.equals("text/plain")) {\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- elr.setMessage("Bad media type: "+ctype);\r
- resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- return;\r
- }\r
- InputStream is = req.getInputStream();\r
- ByteArrayOutputStream bos = new ByteArrayOutputStream();\r
- int ch = 0;\r
- while ((ch = is.read()) >= 0)\r
- bos.write(ch);\r
- RLEBitSet bs = new RLEBitSet(bos.toString()); // The set of records to retrieve\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType("text/plain");\r
- LogRecord.printLogRecords(resp.getOutputStream(), bs);\r
- eventlogger.info(elr);\r
- return;\r
- }\r
-\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- eventlogger.info(elr);\r
- }\r
-\r
- private String catValues(String[] v) {\r
- StringBuilder sb = new StringBuilder();\r
- if (v != null) {\r
- String pfx = "";\r
- for (String s : v) {\r
- sb.append(pfx);\r
- sb.append(s);\r
- pfx = "|";\r
- }\r
- }\r
- return sb.toString();\r
- }\r
- private JSONArray generateLogfileList() {\r
- JSONArray ja = new JSONArray();\r
- Properties p = (new DB()).getProperties();\r
- String s = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");\r
- if (s != null) {\r
- String[] dirs = s.split(",");\r
- for (String dir : dirs) {\r
- File f = new File(dir);\r
- String[] list = f.list();\r
- if (list != null) {\r
- for (String s2 : list) {\r
- if (!s2.startsWith("."))\r
- ja.put(s2);\r
- }\r
- }\r
- }\r
- }\r
- return ja;\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.FileStore;
+import java.nio.file.FileSystem;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.util.Properties;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.JSONArray;
+import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.LogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.Parameters;
+import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;
+import org.onap.dmaap.datarouter.provisioning.utils.DB;
+import org.onap.dmaap.datarouter.provisioning.utils.LogfileLoader;
+import org.onap.dmaap.datarouter.provisioning.utils.RLEBitSet;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * <p>
+ * This servlet handles requests to URLs under /internal on the provisioning server.
+ * These include:
+ * </p>
+ * <div class="contentContainer">
+ * <table class="packageSummary" border="0" cellpadding="3" cellspacing="0">
+ * <caption><span>URL Path Summary</span><span class="tabEnd"> </span></caption>
+ * <tr>
+ * <th class="colFirst" width="15%">URL Path</th>
+ * <th class="colOne">Method</th>
+ * <th class="colLast">Purpose</th>
+ * </tr>
+ * <tr class="altColor">
+ * <td class="colFirst">/internal/prov</td>
+ * <td class="colOne">GET</td>
+ * <td class="colLast">used to GET a full JSON copy of the provisioning data.</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colFirst">/internal/fetchProv</td>
+ * <td class="colOne">GET</td>
+ * <td class="colLast">used to signal to a standby POD that the provisioning data should be fetched from the active POD.</td>
+ * </tr>
+ * <tr class="altColor">
+ * <td class="colFirst" rowspan="2">/internal/logs</td>
+ * <td class="colOne">GET</td>
+ * <td class="colLast">used to GET an index of log files and individual logs for this provisioning server.</td>
+ * </tr>
+ * <tr class="altColor">
+ * <td class="colOne">POST</td>
+ * <td class="colLast">used to POST log files from the individual nodes to this provisioning server.</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colFirst" rowspan="4">/internal/api</td>
+ * <td class="colOne">GET</td>
+ * <td class="colLast">used to GET an individual parameter value. The parameter name is specified by the path after /api/.</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colOne">PUT</td>
+ * <td class="colLast">used to set an individual parameter value. The parameter name is specified by the path after /api/.</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colOne">DELETE</td>
+ * <td class="colLast">used to remove an individual parameter value. The parameter name is specified by the path after /api/.</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colOne">POST</td>
+ * <td class="colLast">used to create a new individual parameter value. The parameter name is specified by the path after /api/.</td>
+ * </tr>
+ * <tr class="altColor">
+ * <td class="colFirst">/internal/halt</td>
+ * <td class="colOne">GET</td>
+ * <td class="colLast">used to halt the server (must be accessed from 127.0.0.1).</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colFirst" rowspan="2">/internal/drlogs</td>
+ * <td class="colOne">GET</td>
+ * <td class="colLast">used to get a list of DR log entries available for retrieval.
+ * Note: these are the actual data router log entries sent to the provisioning server
+ * by the nodes, not the provisioning server's internal logs (access via /internal/logs above).
+ * The range is returned as a list of record sequence numbers.</td>
+ * </tr>
+ * <tr class="rowColor">
+ * <td class="colOne">POST</td>
+ * <td class="colLast">used to retrieve specific log entries.
+ * The sequence numbers of the records to fetch are POST-ed; the records matching the sequence numbers are returned.</td>
+ * </tr>
+ * <tr class="altColor">
+ * <td class="colFirst">/internal/route/*</td>
+ * <td class="colOne">*</td>
+ * <td class="colLast">URLs under this path are handled via the {@link org.onap.dmaap.datarouter.provisioning.RouteServlet}</td>
+ * </tr>
+ * </table>
+ * </div>
+ * <p>
+ * Authorization to use these URLs is a little different than for other URLs on the provisioning server.
+ * For the most part, the IP address that the request comes from should be either:
+ * </p>
+ * <ol>
+ * <li>an IP address of a provisioning server, or</li>
+ * <li>the IP address of a node (to allow access to /internal/prov), or</li>
+ * <li>an IP address from the "<i>special subnet</i>" which is configured with
+ * the PROV_SPECIAL_SUBNET parameter.
+ * </ol>
+ * <p>
+ * In addition, requests to /internal/halt can ONLY come from localhost (127.0.0.1) on the HTTP port.
+ * </p>
+ * <p>
+ * All DELETE/GET/PUT/POST requests made to /internal/api on this servlet on the standby server are
+ * proxied to the active server (using the {@link ProxyServlet}) if it is up and reachable.
+ * </p>
+ *
+ * @author Robert Eby
+ * @version $Id: InternalServlet.java,v 1.23 2014/03/24 18:47:10 eby Exp $
+ */
+@SuppressWarnings("serial")
+public class InternalServlet extends ProxyServlet {
+ private static Integer logseq = new Integer(0); // another piece of info to make log spool file names unique
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.InternalServlet");
+
+ /**
+ * Delete a parameter at the address /internal/api/<parameter>.
+ * See the <b>Internal API</b> document for details on how this method should be invoked.
+ */
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ if (!isAuthorizedForInternal(req)) {
+ elr.setMessage("Unauthorized.");
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");
+ return;
+ }
+
+ String path = req.getPathInfo();
+ if (path.startsWith("/api/")) {
+ if (isProxyOK(req) && isProxyServer()) {
+ super.doDelete(req, resp);
+ return;
+ }
+ String key = path.substring(5);
+ if (key.length() > 0) {
+ Parameters param = Parameters.getParameter(key);
+ if (param != null) {
+ if (doDelete(param)) {
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ provisioningDataChanged();
+ provisioningParametersChanged();
+ } else {
+ // Something went wrong with the DELETE
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ return;
+ }
+ }
+ }
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");
+ }
+ /**
+ * Get some information (such as a parameter) underneath the /internal/ namespace.
+ * See the <b>Internal API</b> document for details on how this method should be invoked.
+ */
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ String path = req.getPathInfo();
+ if (path.equals("/halt") && !req.isSecure()) {
+ // request to halt the server - can ONLY come from localhost
+ String remote = req.getRemoteAddr();
+ if (remote.equals("127.0.0.1")) {
+ intlogger.info("PROV0009 Request to HALT received.");
+ resp.setStatus(HttpServletResponse.SC_OK);
+ Main.shutdown();
+ } else {
+ intlogger.info("PROV0010 Disallowed request to HALT received from "+remote);
+ resp.setStatus(HttpServletResponse.SC_FORBIDDEN);
+ }
+ return;
+ }
+
+ EventLogRecord elr = new EventLogRecord(req);
+ if (!isAuthorizedForInternal(req)) {
+ elr.setMessage("Unauthorized.");
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");
+ return;
+ }
+ if (path.equals("/fetchProv") && !req.isSecure()) {
+ // if request came from active_pod or standby_pod and it is not us, reload prov data
+ SynchronizerTask s = SynchronizerTask.getSynchronizer();
+ s.doFetch();
+ resp.setStatus(HttpServletResponse.SC_OK);
+ return;
+ }
+ if (path.equals("/prov")) {
+ if (isProxyOK(req) && isProxyServer()) {
+ if (super.doGetWithFallback(req, resp))
+ return;
+ // fall back to returning the local data if the remote is unreachable
+ intlogger.info("Active server unavailable; falling back to local copy.");
+ }
+ Poker p = Poker.getPoker();
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(PROVFULL_CONTENT_TYPE2);
+ resp.getOutputStream().print(p.getProvisioningString());
+ return;
+ }
+ if (path.equals("/logs") || path.equals("/logs/")) {
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType("application/json");
+ resp.getOutputStream().print(generateLogfileList().toString());
+ return;
+ }
+ if (path.startsWith("/logs/")) {
+ Properties p = (new DB()).getProperties();
+ String logdir = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");
+ String logfile = path.substring(6);
+ if (logdir != null && logfile != null && logfile.indexOf('/') < 0) {
+ File log = new File(logdir + "/" + logfile);
+ if (log.exists() && log.isFile()) {
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType("text/plain");
+ Path logpath = Paths.get(log.getAbsolutePath());
+ Files.copy(logpath, resp.getOutputStream());
+ return;
+ }
+ }
+ resp.sendError(HttpServletResponse.SC_NO_CONTENT, "No file.");
+ return;
+ }
+ if (path.startsWith("/api/")) {
+ if (isProxyOK(req) && isProxyServer()) {
+ super.doGet(req, resp);
+ return;
+ }
+ String key = path.substring(5);
+ if (key.length() > 0) {
+ Parameters param = Parameters.getParameter(key);
+ if (param != null) {
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType("text/plain");
+ resp.getOutputStream().print(param.getValue() + "\n");
+ return;
+ }
+ }
+ }
+ if (path.equals("/drlogs") || path.equals("/drlogs/")) {
+ // Special POD <=> POD API to determine what log file records are loaded here
+ LogfileLoader lfl = LogfileLoader.getLoader();
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType("text/plain");
+ resp.getOutputStream().print(lfl.getBitSet().toString());
+ return;
+ }
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");
+ }
+ /**
+ * Modify a parameter at the address /internal/api/<parameter>.
+ * See the <b>Internal API</b> document for details on how this method should be invoked.
+ */
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ if (!isAuthorizedForInternal(req)) {
+ elr.setMessage("Unauthorized.");
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");
+ return;
+ }
+ String path = req.getPathInfo();
+ if (path.startsWith("/api/")) {
+ if (isProxyOK(req) && isProxyServer()) {
+ super.doPut(req, resp);
+ return;
+ }
+ String key = path.substring(5);
+ if (key.length() > 0) {
+ Parameters param = Parameters.getParameter(key);
+ if (param != null) {
+ String t = catValues(req.getParameterValues("val"));
+ param.setValue(t);
+ if (doUpdate(param)) {
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ provisioningDataChanged();
+ provisioningParametersChanged();
+ } else {
+ // Something went wrong with the UPDATE
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ return;
+ }
+ }
+ }
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");
+ }
+ /**
+ * Create some new information (such as a parameter or log entries) underneath the /internal/ namespace.
+ * See the <b>Internal API</b> document for details on how this method should be invoked.
+ */
+ @SuppressWarnings("resource")
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPost");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
+ EventLogRecord elr = new EventLogRecord(req);
+ if (!isAuthorizedForInternal(req)) {
+ elr.setMessage("Unauthorized.");
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");
+ return;
+ }
+
+ String path = req.getPathInfo();
+ if (path.startsWith("/api/")) {
+ if (isProxyOK(req) && isProxyServer()) {
+ super.doPost(req, resp);
+ return;
+ }
+ String key = path.substring(5);
+ if (key.length() > 0) {
+ Parameters param = Parameters.getParameter(key);
+ if (param == null) {
+ String t = catValues(req.getParameterValues("val"));
+ param = new Parameters(key, t);
+ if (doInsert(param)) {
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ provisioningDataChanged();
+ provisioningParametersChanged();
+ } else {
+ // Something went wrong with the INSERT
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ return;
+ }
+ }
+ }
+
+ if (path.equals("/logs") || path.equals("/logs/")) {
+ String ctype = req.getHeader("Content-Type");
+ if (ctype == null || !ctype.equals("text/plain")) {
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ elr.setMessage("Bad media type: "+ctype);
+ resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ return;
+ }
+ String spooldir = (new DB()).getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");
+ String spoolname = String.format("%d-%d-", System.currentTimeMillis(), Thread.currentThread().getId());
+ synchronized (logseq) {
+ // perhaps unnecessary, but it helps make the name unique
+ spoolname += logseq.toString();
+ logseq++;
+ }
+ String encoding = req.getHeader("Content-Encoding");
+ if (encoding != null) {
+ if (encoding.trim().equals("gzip")) {
+ spoolname += ".gz";
+ } else {
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ return;
+ }
+ }
+ // Determine space available -- available space must be at least 5%
+ FileSystem fs = (Paths.get(spooldir)).getFileSystem();
+ long total = 0;
+ long avail = 0;
+ for (FileStore store: fs.getFileStores()) {
+ total += store.getTotalSpace();
+ avail += store.getUsableSpace();
+ }
+ try { fs.close(); } catch (Exception e) { }
+ if (((avail * 100) / total) < 5) {
+ elr.setResult(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
+ resp.setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
+ eventlogger.info(elr);
+ return;
+ }
+ Path tmppath = Paths.get(spooldir, spoolname);
+ Path donepath = Paths.get(spooldir, "IN."+spoolname);
+ Files.copy(req.getInputStream(), Paths.get(spooldir, spoolname), StandardCopyOption.REPLACE_EXISTING);
+ Files.move(tmppath, donepath, StandardCopyOption.REPLACE_EXISTING);
+ elr.setResult(HttpServletResponse.SC_CREATED);
+ resp.setStatus(HttpServletResponse.SC_CREATED);
+ eventlogger.info(elr);
+ LogfileLoader.getLoader(); // This starts the logfile loader "task"
+ return;
+ }
+
+ if (path.equals("/drlogs") || path.equals("/drlogs/")) {
+ // Receive post request and generate log entries
+ String ctype = req.getHeader("Content-Type");
+ if (ctype == null || !ctype.equals("text/plain")) {
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ elr.setMessage("Bad media type: "+ctype);
+ resp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ return;
+ }
+ InputStream is = req.getInputStream();
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ int ch = 0;
+ while ((ch = is.read()) >= 0)
+ bos.write(ch);
+ RLEBitSet bs = new RLEBitSet(bos.toString()); // The set of records to retrieve
+ elr.setResult(HttpServletResponse.SC_OK);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType("text/plain");
+ LogRecord.printLogRecords(resp.getOutputStream(), bs);
+ eventlogger.info(elr);
+ return;
+ }
+
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");
+ eventlogger.info(elr);
+ }
+
+ private String catValues(String[] v) {
+ StringBuilder sb = new StringBuilder();
+ if (v != null) {
+ String pfx = "";
+ for (String s : v) {
+ sb.append(pfx);
+ sb.append(s);
+ pfx = "|";
+ }
+ }
+ return sb.toString();
+ }
+ private JSONArray generateLogfileList() {
+ JSONArray ja = new JSONArray();
+ Properties p = (new DB()).getProperties();
+ String s = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");
+ if (s != null) {
+ String[] dirs = s.split(",");
+ for (String dir : dirs) {
+ File f = new File(dir);
+ String[] list = f.list();
+ if (list != null) {
+ for (String s2 : list) {
+ if (!s2.startsWith("."))
+ ja.put(s2);
+ }
+ }
+ }
+ }
+ return ja;
+ }
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*/\r
@SuppressWarnings("serial")\r
public class LogServlet extends BaseServlet {\r
- //Adding EELF Logger Rally:US664892 \r
+ //Adding EELF Logger Rally:US664892\r
private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.LogServlet");\r
\r
- private static final long TWENTYFOUR_HOURS = (24 * 60 * 60 * 1000L);\r
- private static final String fmt1 = "yyyy-MM-dd'T'HH:mm:ss'Z'";\r
- private static final String fmt2 = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";\r
+ private static final long TWENTYFOUR_HOURS = (24 * 60 * 60 * 1000L);\r
+ private static final String fmt1 = "yyyy-MM-dd'T'HH:mm:ss'Z'";\r
+ private static final String fmt2 = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";\r
\r
- private boolean isfeedlog;\r
+ private boolean isfeedlog;\r
\r
- public abstract class RowHandler {\r
- private final ServletOutputStream out;\r
- private final String[] fields;\r
- public boolean firstrow;\r
+ public abstract class RowHandler {\r
+ private final ServletOutputStream out;\r
+ private final String[] fields;\r
+ public boolean firstrow;\r
\r
- public RowHandler(ServletOutputStream out, String fieldparam, boolean b) {\r
- this.out = out;\r
- this.firstrow = b;\r
- this.fields = (fieldparam != null) ? fieldparam.split(":") : null;\r
- }\r
- public void handleRow(ResultSet rs) {\r
- try {\r
- LOGJSONable js = buildJSONable(rs);\r
- LOGJSONObject jo = js.asJSONObject();\r
- if (fields != null) {\r
- // filter out unwanted fields\r
- LOGJSONObject j2 = new LOGJSONObject();\r
- for (String key : fields) {\r
- Object v = jo.opt(key);\r
- if (v != null)\r
- j2.put(key, v);\r
- }\r
- jo = j2;\r
- }\r
- String t = firstrow ? "\n" : ",\n";\r
- t += jo.toString();\r
- out.print(t);\r
- firstrow = false;\r
- } catch (Exception e) {\r
- // ignore\r
- }\r
- }\r
- public abstract LOGJSONable buildJSONable(ResultSet rs) throws SQLException;\r
- }\r
- public class PublishRecordRowHandler extends RowHandler {\r
- public PublishRecordRowHandler(ServletOutputStream out, String fields, boolean b) {\r
- super(out, fields, b);\r
- }\r
- @Override\r
- public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {\r
- return new PublishRecord(rs);\r
- }\r
- }\r
- public class DeliveryRecordRowHandler extends RowHandler {\r
- public DeliveryRecordRowHandler(ServletOutputStream out, String fields, boolean b) {\r
- super(out, fields, b);\r
- }\r
- @Override\r
- public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {\r
- return new DeliveryRecord(rs);\r
- }\r
- }\r
- public class ExpiryRecordRowHandler extends RowHandler {\r
- public ExpiryRecordRowHandler(ServletOutputStream out, String fields, boolean b) {\r
- super(out, fields, b);\r
- }\r
- @Override\r
- public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {\r
- return new ExpiryRecord(rs);\r
- }\r
- }\r
+ public RowHandler(ServletOutputStream out, String fieldparam, boolean b) {\r
+ this.out = out;\r
+ this.firstrow = b;\r
+ this.fields = (fieldparam != null) ? fieldparam.split(":") : null;\r
+ }\r
+ public void handleRow(ResultSet rs) {\r
+ try {\r
+ LOGJSONable js = buildJSONable(rs);\r
+ LOGJSONObject jo = js.asJSONObject();\r
+ if (fields != null) {\r
+ // filter out unwanted fields\r
+ LOGJSONObject j2 = new LOGJSONObject();\r
+ for (String key : fields) {\r
+ Object v = jo.opt(key);\r
+ if (v != null)\r
+ j2.put(key, v);\r
+ }\r
+ jo = j2;\r
+ }\r
+ String t = firstrow ? "\n" : ",\n";\r
+ t += jo.toString();\r
+ out.print(t);\r
+ firstrow = false;\r
+ } catch (Exception e) {\r
+ // ignore\r
+ }\r
+ }\r
+ public abstract LOGJSONable buildJSONable(ResultSet rs) throws SQLException;\r
+ }\r
+ public class PublishRecordRowHandler extends RowHandler {\r
+ public PublishRecordRowHandler(ServletOutputStream out, String fields, boolean b) {\r
+ super(out, fields, b);\r
+ }\r
+ @Override\r
+ public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {\r
+ return new PublishRecord(rs);\r
+ }\r
+ }\r
+ public class DeliveryRecordRowHandler extends RowHandler {\r
+ public DeliveryRecordRowHandler(ServletOutputStream out, String fields, boolean b) {\r
+ super(out, fields, b);\r
+ }\r
+ @Override\r
+ public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {\r
+ return new DeliveryRecord(rs);\r
+ }\r
+ }\r
+ public class ExpiryRecordRowHandler extends RowHandler {\r
+ public ExpiryRecordRowHandler(ServletOutputStream out, String fields, boolean b) {\r
+ super(out, fields, b);\r
+ }\r
+ @Override\r
+ public LOGJSONable buildJSONable(ResultSet rs) throws SQLException {\r
+ return new ExpiryRecord(rs);\r
+ }\r
+ }\r
\r
- /**\r
- * This class must be created from either a {@link FeedLogServlet} or a {@link SubLogServlet}.\r
- * @param isFeedLog boolean to handle those places where a feedlog request is different from\r
- * a sublog request\r
- */\r
- protected LogServlet(boolean isFeedLog) {\r
- this.isfeedlog = isFeedLog;\r
- }\r
+ /**\r
+ * This class must be created from either a {@link FeedLogServlet} or a {@link SubLogServlet}.\r
+ * @param isFeedLog boolean to handle those places where a feedlog request is different from\r
+ * a sublog request\r
+ */\r
+ protected LogServlet(boolean isFeedLog) {\r
+ this.isfeedlog = isFeedLog;\r
+ }\r
\r
- /**\r
- * DELETE a logging URL -- not supported.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String message = "DELETE not allowed for the logURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * GET a logging URL -- retrieve logging data for a feed or subscription.\r
- * See the <b>Logging API</b> document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- int id = getIdFromPath(req);\r
- if (id < 0) {\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing or bad feed/subscription number.");\r
- return;\r
- }\r
- Map<String, String> map = buildMapFromRequest(req);\r
- if (map.get("err") != null) {\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments: "+map.get("err"));\r
- return;\r
- }\r
- // check Accept: header??\r
+ /**\r
+ * DELETE a logging URL -- not supported.\r
+ */\r
+ @Override\r
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doDelete");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
+ String message = "DELETE not allowed for the logURL.";\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+ }\r
+ /**\r
+ * GET a logging URL -- retrieve logging data for a feed or subscription.\r
+ * See the <b>Logging API</b> document for details on how this method should be invoked.\r
+ */\r
+ @Override\r
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doGet");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
+ int id = getIdFromPath(req);\r
+ if (id < 0) {\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing or bad feed/subscription number.");\r
+ return;\r
+ }\r
+ Map<String, String> map = buildMapFromRequest(req);\r
+ if (map.get("err") != null) {\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments: "+map.get("err"));\r
+ return;\r
+ }\r
+ // check Accept: header??\r
\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(LOGLIST_CONTENT_TYPE);\r
- @SuppressWarnings("resource")\r
- ServletOutputStream out = resp.getOutputStream();\r
- final String fields = req.getParameter("fields");\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ resp.setContentType(LOGLIST_CONTENT_TYPE);\r
+ @SuppressWarnings("resource")\r
+ ServletOutputStream out = resp.getOutputStream();\r
+ final String fields = req.getParameter("fields");\r
\r
- out.print("[");\r
- if (isfeedlog) {\r
- // Handle /feedlog/feedid request\r
- boolean firstrow = true;\r
+ out.print("[");\r
+ if (isfeedlog) {\r
+ // Handle /feedlog/feedid request\r
+ boolean firstrow = true;\r
\r
- // 1. Collect publish records for this feed\r
- RowHandler rh = new PublishRecordRowHandler(out, fields, firstrow);\r
- getPublishRecordsForFeed(id, rh, map);\r
- firstrow = rh.firstrow;\r
+ // 1. Collect publish records for this feed\r
+ RowHandler rh = new PublishRecordRowHandler(out, fields, firstrow);\r
+ getPublishRecordsForFeed(id, rh, map);\r
+ firstrow = rh.firstrow;\r
\r
- // 2. Collect delivery records for subscriptions to this feed\r
- rh = new DeliveryRecordRowHandler(out, fields, firstrow);\r
- getDeliveryRecordsForFeed(id, rh, map);\r
- firstrow = rh.firstrow;\r
+ // 2. Collect delivery records for subscriptions to this feed\r
+ rh = new DeliveryRecordRowHandler(out, fields, firstrow);\r
+ getDeliveryRecordsForFeed(id, rh, map);\r
+ firstrow = rh.firstrow;\r
\r
- // 3. Collect expiry records for subscriptions to this feed\r
- rh = new ExpiryRecordRowHandler(out, fields, firstrow);\r
- getExpiryRecordsForFeed(id, rh, map);\r
- } else {\r
- // Handle /sublog/subid request\r
- Subscription sub = Subscription.getSubscriptionById(id);\r
- if (sub != null) {\r
- // 1. Collect publish records for the feed this subscription feeds\r
- RowHandler rh = new PublishRecordRowHandler(out, fields, true);\r
- getPublishRecordsForFeed(sub.getFeedid(), rh, map);\r
+ // 3. Collect expiry records for subscriptions to this feed\r
+ rh = new ExpiryRecordRowHandler(out, fields, firstrow);\r
+ getExpiryRecordsForFeed(id, rh, map);\r
+ } else {\r
+ // Handle /sublog/subid request\r
+ Subscription sub = Subscription.getSubscriptionById(id);\r
+ if (sub != null) {\r
+ // 1. Collect publish records for the feed this subscription feeds\r
+ RowHandler rh = new PublishRecordRowHandler(out, fields, true);\r
+ getPublishRecordsForFeed(sub.getFeedid(), rh, map);\r
\r
- // 2. Collect delivery records for this subscription\r
- rh = new DeliveryRecordRowHandler(out, fields, rh.firstrow);\r
- getDeliveryRecordsForSubscription(id, rh, map);\r
+ // 2. Collect delivery records for this subscription\r
+ rh = new DeliveryRecordRowHandler(out, fields, rh.firstrow);\r
+ getDeliveryRecordsForSubscription(id, rh, map);\r
\r
- // 3. Collect expiry records for this subscription\r
- rh = new ExpiryRecordRowHandler(out, fields, rh.firstrow);\r
- getExpiryRecordsForSubscription(id, rh, map);\r
- }\r
- }\r
- out.print("\n]");\r
- }\r
- /**\r
- * PUT a logging URL -- not supported.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String message = "PUT not allowed for the logURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * POST a logging URL -- not supported.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- String message = "POST not allowed for the logURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
+ // 3. Collect expiry records for this subscription\r
+ rh = new ExpiryRecordRowHandler(out, fields, rh.firstrow);\r
+ getExpiryRecordsForSubscription(id, rh, map);\r
+ }\r
+ }\r
+ out.print("\n]");\r
+ }\r
+ /**\r
+ * PUT a logging URL -- not supported.\r
+ */\r
+ @Override\r
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doPut");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
+ String message = "PUT not allowed for the logURL.";\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+ }\r
+ /**\r
+ * POST a logging URL -- not supported.\r
+ */\r
+ @Override\r
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doPost");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
+ String message = "POST not allowed for the logURL.";\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+ }\r
\r
- private Map<String, String> buildMapFromRequest(HttpServletRequest req) {\r
- Map<String, String> map = new HashMap<String, String>();\r
- String s = req.getParameter("type");\r
- if (s != null) {\r
- if (s.equals("pub") || s.equals("del") || s.equals("exp")) {\r
- map.put("type", s);\r
- } else {\r
- map.put("err", "bad type");\r
- return map;\r
- }\r
- } else\r
- map.put("type", "all");\r
- map.put("publishSQL", "");\r
- map.put("statusSQL", "");\r
- map.put("resultSQL", "");\r
- map.put("reasonSQL", "");\r
+ private Map<String, String> buildMapFromRequest(HttpServletRequest req) {\r
+ Map<String, String> map = new HashMap<String, String>();\r
+ String s = req.getParameter("type");\r
+ if (s != null) {\r
+ if (s.equals("pub") || s.equals("del") || s.equals("exp")) {\r
+ map.put("type", s);\r
+ } else {\r
+ map.put("err", "bad type");\r
+ return map;\r
+ }\r
+ } else\r
+ map.put("type", "all");\r
+ map.put("publishSQL", "");\r
+ map.put("statusSQL", "");\r
+ map.put("resultSQL", "");\r
+ map.put("reasonSQL", "");\r
\r
- s = req.getParameter("publishId");\r
- if (s != null) {\r
- if (s.indexOf("'") >= 0) {\r
- map.put("err", "bad publishId");\r
- return map;\r
- }\r
- map.put("publishSQL", " AND PUBLISH_ID = '"+s+"'");\r
- }\r
+ s = req.getParameter("publishId");\r
+ if (s != null) {\r
+ if (s.indexOf("'") >= 0) {\r
+ map.put("err", "bad publishId");\r
+ return map;\r
+ }\r
+ map.put("publishSQL", " AND PUBLISH_ID = '"+s+"'");\r
+ }\r
\r
- s = req.getParameter("statusCode");\r
- if (s != null) {\r
- String sql = null;\r
- if (s.equals("success")) {\r
- sql = " AND STATUS >= 200 AND STATUS < 300";\r
- } else if (s.equals("redirect")) {\r
- sql = " AND STATUS >= 300 AND STATUS < 400";\r
- } else if (s.equals("failure")) {\r
- sql = " AND STATUS >= 400";\r
- } else {\r
- try {\r
- Integer n = Integer.parseInt(s);\r
- if ((n >= 100 && n < 600) || (n == -1))\r
- sql = " AND STATUS = " + n;\r
- } catch (NumberFormatException e) {\r
- }\r
- }\r
- if (sql == null) {\r
- map.put("err", "bad statusCode");\r
- return map;\r
- }\r
- map.put("statusSQL", sql);\r
- map.put("resultSQL", sql.replaceAll("STATUS", "RESULT"));\r
- }\r
+ s = req.getParameter("statusCode");\r
+ if (s != null) {\r
+ String sql = null;\r
+ if (s.equals("success")) {\r
+ sql = " AND STATUS >= 200 AND STATUS < 300";\r
+ } else if (s.equals("redirect")) {\r
+ sql = " AND STATUS >= 300 AND STATUS < 400";\r
+ } else if (s.equals("failure")) {\r
+ sql = " AND STATUS >= 400";\r
+ } else {\r
+ try {\r
+ Integer n = Integer.parseInt(s);\r
+ if ((n >= 100 && n < 600) || (n == -1))\r
+ sql = " AND STATUS = " + n;\r
+ } catch (NumberFormatException e) {\r
+ }\r
+ }\r
+ if (sql == null) {\r
+ map.put("err", "bad statusCode");\r
+ return map;\r
+ }\r
+ map.put("statusSQL", sql);\r
+ map.put("resultSQL", sql.replaceAll("STATUS", "RESULT"));\r
+ }\r
\r
- s = req.getParameter("expiryReason");\r
- if (s != null) {\r
- map.put("type", "exp");\r
- if (s.equals("notRetryable")) {\r
- map.put("reasonSQL", " AND REASON = 'notRetryable'");\r
- } else if (s.equals("retriesExhausted")) {\r
- map.put("reasonSQL", " AND REASON = 'retriesExhausted'");\r
- } else if (s.equals("diskFull")) {\r
- map.put("reasonSQL", " AND REASON = 'diskFull'");\r
- } else if (s.equals("other")) {\r
- map.put("reasonSQL", " AND REASON = 'other'");\r
- } else {\r
- map.put("err", "bad expiryReason");\r
- return map;\r
- }\r
- }\r
+ s = req.getParameter("expiryReason");\r
+ if (s != null) {\r
+ map.put("type", "exp");\r
+ if (s.equals("notRetryable")) {\r
+ map.put("reasonSQL", " AND REASON = 'notRetryable'");\r
+ } else if (s.equals("retriesExhausted")) {\r
+ map.put("reasonSQL", " AND REASON = 'retriesExhausted'");\r
+ } else if (s.equals("diskFull")) {\r
+ map.put("reasonSQL", " AND REASON = 'diskFull'");\r
+ } else if (s.equals("other")) {\r
+ map.put("reasonSQL", " AND REASON = 'other'");\r
+ } else {\r
+ map.put("err", "bad expiryReason");\r
+ return map;\r
+ }\r
+ }\r
\r
- long stime = getTimeFromParam(req.getParameter("start"));\r
- if (stime < 0) {\r
- map.put("err", "bad start");\r
- return map;\r
- }\r
- long etime = getTimeFromParam(req.getParameter("end"));\r
- if (etime < 0) {\r
- map.put("err", "bad end");\r
- return map;\r
- }\r
- if (stime == 0 && etime == 0) {\r
- etime = System.currentTimeMillis();\r
- stime = etime - TWENTYFOUR_HOURS;\r
- } else if (stime == 0) {\r
- stime = etime - TWENTYFOUR_HOURS;\r
- } else if (etime == 0) {\r
- etime = stime + TWENTYFOUR_HOURS;\r
- }\r
- map.put("timeSQL", String.format(" AND EVENT_TIME >= %d AND EVENT_TIME <= %d", stime, etime));\r
- return map;\r
- }\r
- private long getTimeFromParam(final String s) {\r
- if (s == null)\r
- return 0;\r
- try {\r
- // First, look for an RFC 3339 date\r
- String fmt = (s.indexOf('.') > 0) ? fmt2 : fmt1;\r
- SimpleDateFormat sdf = new SimpleDateFormat(fmt);\r
- Date d = sdf.parse(s);\r
- return d.getTime();\r
- } catch (ParseException e) {\r
- }\r
- try {\r
- // Also allow a long (in ms); useful for testing\r
- long n = Long.parseLong(s);\r
- return n;\r
- } catch (NumberFormatException e) {\r
- }\r
- intlogger.info("Error parsing time="+s);\r
- return -1;\r
- }\r
+ long stime = getTimeFromParam(req.getParameter("start"));\r
+ if (stime < 0) {\r
+ map.put("err", "bad start");\r
+ return map;\r
+ }\r
+ long etime = getTimeFromParam(req.getParameter("end"));\r
+ if (etime < 0) {\r
+ map.put("err", "bad end");\r
+ return map;\r
+ }\r
+ if (stime == 0 && etime == 0) {\r
+ etime = System.currentTimeMillis();\r
+ stime = etime - TWENTYFOUR_HOURS;\r
+ } else if (stime == 0) {\r
+ stime = etime - TWENTYFOUR_HOURS;\r
+ } else if (etime == 0) {\r
+ etime = stime + TWENTYFOUR_HOURS;\r
+ }\r
+ map.put("timeSQL", String.format(" AND EVENT_TIME >= %d AND EVENT_TIME <= %d", stime, etime));\r
+ return map;\r
+ }\r
+ private long getTimeFromParam(final String s) {\r
+ if (s == null)\r
+ return 0;\r
+ try {\r
+ // First, look for an RFC 3339 date\r
+ String fmt = (s.indexOf('.') > 0) ? fmt2 : fmt1;\r
+ SimpleDateFormat sdf = new SimpleDateFormat(fmt);\r
+ Date d = sdf.parse(s);\r
+ return d.getTime();\r
+ } catch (ParseException e) {\r
+ }\r
+ try {\r
+ // Also allow a long (in ms); useful for testing\r
+ long n = Long.parseLong(s);\r
+ return n;\r
+ } catch (NumberFormatException e) {\r
+ }\r
+ intlogger.info("Error parsing time="+s);\r
+ return -1;\r
+ }\r
\r
- private void getPublishRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {\r
- String type = map.get("type");\r
- if (type.equals("all") || type.equals("pub")) {\r
- String sql = "select * from LOG_RECORDS where FEEDID = "+feedid\r
- + " AND TYPE = 'pub'"\r
- + map.get("timeSQL") + map.get("publishSQL") + map.get("statusSQL");\r
- getRecordsForSQL(sql, rh);\r
- }\r
- }\r
- private void getDeliveryRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {\r
- String type = map.get("type");\r
- if (type.equals("all") || type.equals("del")) {\r
- String sql = "select * from LOG_RECORDS where FEEDID = "+feedid\r
- + " AND TYPE = 'del'"\r
- + map.get("timeSQL") + map.get("publishSQL") + map.get("resultSQL");\r
- getRecordsForSQL(sql, rh);\r
- }\r
- }\r
- private void getDeliveryRecordsForSubscription(int subid, RowHandler rh, Map<String, String> map) {\r
- String type = map.get("type");\r
- if (type.equals("all") || type.equals("del")) {\r
- String sql = "select * from LOG_RECORDS where DELIVERY_SUBID = "+subid\r
- + " AND TYPE = 'del'"\r
- + map.get("timeSQL") + map.get("publishSQL") + map.get("resultSQL");\r
- getRecordsForSQL(sql, rh);\r
- }\r
- }\r
- private void getExpiryRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {\r
- String type = map.get("type");\r
- if (type.equals("all") || type.equals("exp")) {\r
- String st = map.get("statusSQL");\r
- if (st == null || st.length() == 0) {\r
- String sql = "select * from LOG_RECORDS where FEEDID = "+feedid\r
- + " AND TYPE = 'exp'"\r
- + map.get("timeSQL") + map.get("publishSQL") + map.get("reasonSQL");\r
- getRecordsForSQL(sql, rh);\r
- }\r
- }\r
- }\r
- private void getExpiryRecordsForSubscription(int subid, RowHandler rh, Map<String, String> map) {\r
- String type = map.get("type");\r
- if (type.equals("all") || type.equals("exp")) {\r
- String st = map.get("statusSQL");\r
- if (st == null || st.length() == 0) {\r
- String sql = "select * from LOG_RECORDS where DELIVERY_SUBID = "+subid\r
- + " AND TYPE = 'exp'"\r
- + map.get("timeSQL") + map.get("publishSQL") + map.get("reasonSQL");\r
- getRecordsForSQL(sql, rh);\r
- }\r
- }\r
- }\r
- private void getRecordsForSQL(String sql, RowHandler rh) {\r
- intlogger.debug(sql);\r
- long start = System.currentTimeMillis();\r
- DB db = new DB();\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- rh.handleRow(rs);\r
- }\r
- rs.close();\r
- stmt.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- if (conn != null)\r
- db.release(conn);\r
- }\r
- intlogger.debug("Time: " + (System.currentTimeMillis()-start) + " ms");\r
- }\r
+ private void getPublishRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {\r
+ String type = map.get("type");\r
+ if (type.equals("all") || type.equals("pub")) {\r
+ String sql = "select * from LOG_RECORDS where FEEDID = "+feedid\r
+ + " AND TYPE = 'pub'"\r
+ + map.get("timeSQL") + map.get("publishSQL") + map.get("statusSQL");\r
+ getRecordsForSQL(sql, rh);\r
+ }\r
+ }\r
+ private void getDeliveryRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {\r
+ String type = map.get("type");\r
+ if (type.equals("all") || type.equals("del")) {\r
+ String sql = "select * from LOG_RECORDS where FEEDID = "+feedid\r
+ + " AND TYPE = 'del'"\r
+ + map.get("timeSQL") + map.get("publishSQL") + map.get("resultSQL");\r
+ getRecordsForSQL(sql, rh);\r
+ }\r
+ }\r
+ private void getDeliveryRecordsForSubscription(int subid, RowHandler rh, Map<String, String> map) {\r
+ String type = map.get("type");\r
+ if (type.equals("all") || type.equals("del")) {\r
+ String sql = "select * from LOG_RECORDS where DELIVERY_SUBID = "+subid\r
+ + " AND TYPE = 'del'"\r
+ + map.get("timeSQL") + map.get("publishSQL") + map.get("resultSQL");\r
+ getRecordsForSQL(sql, rh);\r
+ }\r
+ }\r
+ private void getExpiryRecordsForFeed(int feedid, RowHandler rh, Map<String, String> map) {\r
+ String type = map.get("type");\r
+ if (type.equals("all") || type.equals("exp")) {\r
+ String st = map.get("statusSQL");\r
+ if (st == null || st.length() == 0) {\r
+ String sql = "select * from LOG_RECORDS where FEEDID = "+feedid\r
+ + " AND TYPE = 'exp'"\r
+ + map.get("timeSQL") + map.get("publishSQL") + map.get("reasonSQL");\r
+ getRecordsForSQL(sql, rh);\r
+ }\r
+ }\r
+ }\r
+ private void getExpiryRecordsForSubscription(int subid, RowHandler rh, Map<String, String> map) {\r
+ String type = map.get("type");\r
+ if (type.equals("all") || type.equals("exp")) {\r
+ String st = map.get("statusSQL");\r
+ if (st == null || st.length() == 0) {\r
+ String sql = "select * from LOG_RECORDS where DELIVERY_SUBID = "+subid\r
+ + " AND TYPE = 'exp'"\r
+ + map.get("timeSQL") + map.get("publishSQL") + map.get("reasonSQL");\r
+ getRecordsForSQL(sql, rh);\r
+ }\r
+ }\r
+ }\r
+ private void getRecordsForSQL(String sql, RowHandler rh) {\r
+ intlogger.debug(sql);\r
+ long start = System.currentTimeMillis();\r
+ DB db = new DB();\r
+ Connection conn = null;\r
+ try {\r
+ conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ rh.handleRow(rs);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ if (conn != null)\r
+ db.release(conn);\r
+ }\r
+ intlogger.debug("Time: " + (System.currentTimeMillis()-start) + " ms");\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* </p>\r
* <ul>\r
* <li>One background Thread runs the {@link LogfileLoader} in order to process incoming logfiles.\r
- * This Thread is created as a side effect of the first successful POST to the /internal/logs/ servlet.</li>\r
+ * This Thread is created as a side effect of the first successful POST to the /internal/logs/ servlet.</li>\r
* <li>One background Thread runs the {@link SynchronizerTask} which is used to periodically\r
- * synchronize the database between active and standby servers.</li>\r
+ * synchronize the database between active and standby servers.</li>\r
* <li>One background Thread runs the {@link Poker} which is used to notify the nodes whenever\r
- * provisioning data changes.</li>\r
+ * provisioning data changes.</li>\r
* <li>One task is run once a day to run {@link PurgeLogDirTask} which purges older logs from the\r
- * /opt/app/datartr/logs directory.</li>\r
+ * /opt/app/datartr/logs directory.</li>\r
* </ul>\r
* <p>\r
* The provisioning server is stopped by issuing a GET to the URL http://127.0.0.1/internal/halt\r
* @version $Id: Main.java,v 1.12 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class Main {\r
- /** The truststore to use if none is specified */\r
- public static final String DEFAULT_TRUSTSTORE = "/opt/java/jdk/jdk180/jre/lib/security/cacerts";\r
- public static final String KEYSTORE_TYPE_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.type";\r
- public static final String KEYSTORE_PATH_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.path";\r
- public static final String KEYSTORE_PASSWORD_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.password";\r
- public static final String TRUSTSTORE_PATH_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.path";\r
- public static final String TRUSTSTORE_PASSWORD_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.password";\r
-\r
- /** The one and only {@link Server} instance in this JVM */\r
- private static Server server;\r
-\r
- /**\r
- * Starts the Data Router Provisioning server.\r
- * @param args not used\r
- * @throws Exception if Jetty has a problem starting\r
- */\r
- public static void main(String[] args) throws Exception {\r
- Security.setProperty("networkaddress.cache.ttl", "4");\r
- Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
-\r
- // Check DB is accessible and contains the expected tables\r
- if (! checkDatabase(logger))\r
- System.exit(1);\r
-\r
- logger.info("PROV0000 **** AT&T Data Router Provisioning Server starting....");\r
-\r
- // Get properties\r
- Properties p = (new DB()).getProperties();\r
- int http_port = Integer.parseInt(p.getProperty("org.onap.dmaap.datarouter.provserver.http.port", "8080"));\r
- int https_port = Integer.parseInt(p.getProperty("org.onap.dmaap.datarouter.provserver.https.port", "8443"));\r
-\r
- // HTTP connector\r
- SelectChannelConnector http = new SelectChannelConnector();\r
- http.setPort(http_port);\r
- http.setMaxIdleTime(300000);\r
- http.setRequestHeaderSize(2048);\r
- http.setAcceptors(2);\r
- http.setConfidentialPort(https_port);\r
- http.setLowResourcesConnections(20000);\r
-\r
- // HTTPS connector\r
- SslSelectChannelConnector https = new SslSelectChannelConnector();\r
- https.setPort(https_port);\r
- https.setMaxIdleTime(30000);\r
- https.setRequestHeaderSize(8192);\r
- https.setAcceptors(2);\r
-\r
- // SSL stuff\r
- SslContextFactory cf = https.getSslContextFactory();\r
- \r
- /**Skip SSLv3 Fixes*/\r
- cf.addExcludeProtocols("SSLv3");\r
- logger.info("Excluded protocols prov-"+cf.getExcludeProtocols());\r
- /**End of SSLv3 Fixes*/\r
-\r
- cf.setKeyStoreType(p.getProperty(KEYSTORE_TYPE_PROPERTY, "jks"));\r
- cf.setKeyStorePath(p.getProperty(KEYSTORE_PATH_PROPERTY));\r
- cf.setKeyStorePassword(p.getProperty(KEYSTORE_PASSWORD_PROPERTY));\r
- cf.setKeyManagerPassword(p.getProperty("org.onap.dmaap.datarouter.provserver.keymanager.password"));\r
- String ts = p.getProperty(TRUSTSTORE_PATH_PROPERTY);\r
- if (ts != null && ts.length() > 0) {\r
- System.out.println("@@ TS -> "+ts);\r
- cf.setTrustStore(ts);\r
- cf.setTrustStorePassword(p.getProperty(TRUSTSTORE_PASSWORD_PROPERTY));\r
- } else {\r
- cf.setTrustStore(DEFAULT_TRUSTSTORE);\r
- cf.setTrustStorePassword("changeit");\r
- }\r
- cf.setTrustStore("/opt/app/datartr/self_signed/cacerts.jks");\r
- cf.setTrustStorePassword("changeit");\r
- cf.setWantClientAuth(true);\r
-\r
- // Servlet and Filter configuration\r
- ServletContextHandler ctxt = new ServletContextHandler(0);\r
- ctxt.setContextPath("/");\r
- ctxt.addServlet(new ServletHolder(new FeedServlet()), "/feed/*");\r
- ctxt.addServlet(new ServletHolder(new FeedLogServlet()), "/feedlog/*");\r
- ctxt.addServlet(new ServletHolder(new PublishServlet()), "/publish/*");\r
- ctxt.addServlet(new ServletHolder(new SubscribeServlet()), "/subscribe/*");\r
- ctxt.addServlet(new ServletHolder(new StatisticsServlet()), "/statistics/*");\r
- ctxt.addServlet(new ServletHolder(new SubLogServlet()), "/sublog/*");\r
- ctxt.addServlet(new ServletHolder(new GroupServlet()), "/group/*"); //Provision groups - Rally US708115 -1610 \r
- ctxt.addServlet(new ServletHolder(new SubscriptionServlet()), "/subs/*");\r
- ctxt.addServlet(new ServletHolder(new InternalServlet()), "/internal/*");\r
- ctxt.addServlet(new ServletHolder(new RouteServlet()), "/internal/route/*");\r
- ctxt.addServlet(new ServletHolder(new DRFeedsServlet()), "/");\r
- ctxt.addFilter (new FilterHolder (new ThrottleFilter()), "/publish/*", FilterMapping.REQUEST);\r
-\r
- ContextHandlerCollection contexts = new ContextHandlerCollection();\r
- contexts.addHandler(ctxt);\r
-\r
- // Request log configuration\r
- NCSARequestLog nrl = new NCSARequestLog();\r
- nrl.setFilename(p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir") + "/request.log.yyyy_mm_dd");\r
- nrl.setFilenameDateFormat("yyyyMMdd");\r
- nrl.setRetainDays(90);\r
- nrl.setAppend(true);\r
- nrl.setExtended(false);\r
- nrl.setLogCookies(false);\r
- nrl.setLogTimeZone("GMT");\r
-\r
- RequestLogHandler reqlog = new RequestLogHandler();\r
- reqlog.setRequestLog(nrl);\r
-\r
- // Server's Handler collection\r
- HandlerCollection hc = new HandlerCollection();\r
- hc.setHandlers(new Handler[] { contexts, new DefaultHandler() });\r
- hc.addHandler(reqlog);\r
-\r
- // Server's thread pool\r
- QueuedThreadPool pool = new QueuedThreadPool();\r
- pool.setMinThreads(10);\r
- pool.setMaxThreads(200);\r
- pool.setDetailedDump(false);\r
-\r
- // Daemon to clean up the log directory on a daily basis\r
- Timer rolex = new Timer();\r
- rolex.scheduleAtFixedRate(new PurgeLogDirTask(), 0, 86400000L); // run once per day\r
-\r
- // Start LogfileLoader\r
- LogfileLoader.getLoader();\r
-\r
- // The server itself\r
- server = new Server();\r
- server.setThreadPool(pool);\r
- server.setConnectors(new Connector[] { http, https });\r
- server.setHandler(hc);\r
- server.setStopAtShutdown(true);\r
- server.setSendServerVersion(true);\r
- server.setSendDateHeader(true);\r
- server.setGracefulShutdown(5000); // allow 5 seconds for servlets to wrap up\r
- server.setDumpAfterStart(false);\r
- server.setDumpBeforeStop(false);\r
-\r
- server.start();\r
- server.join();\r
- logger.info("PROV0001 **** AT&T Data Router Provisioning Server halted.");\r
- }\r
-\r
- private static boolean checkDatabase(Logger logger) {\r
- DB db = new DB();\r
- return db.runRetroFits();\r
- }\r
-\r
- /**\r
- * Stop the Jetty server.\r
- */\r
- public static void shutdown() {\r
- new Thread() {\r
- @Override\r
- public void run() {\r
- try {\r
- server.stop();\r
- Thread.sleep(5000L);\r
- System.exit(0);\r
- } catch (Exception e) {\r
- // ignore\r
- }\r
- }\r
- }.start();\r
- }\r
+ /**\r
+ * The truststore to use if none is specified\r
+ */\r
+ public static final String DEFAULT_TRUSTSTORE = "/opt/java/jdk/jdk180/jre/lib/security/cacerts";\r
+ public static final String KEYSTORE_TYPE_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.type";\r
+ public static final String KEYSTORE_PATH_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.path";\r
+ public static final String KEYSTORE_PASSWORD_PROPERTY = "org.onap.dmaap.datarouter.provserver.keystore.password";\r
+ public static final String TRUSTSTORE_PATH_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.path";\r
+ public static final String TRUSTSTORE_PASSWORD_PROPERTY = "org.onap.dmaap.datarouter.provserver.truststore.password";\r
+\r
+ /**\r
+ * The one and only {@link Server} instance in this JVM\r
+ */\r
+ private static Server server;\r
+\r
+ /**\r
+ * Starts the Data Router Provisioning server.\r
+ *\r
+ * @param args not used\r
+ * @throws Exception if Jetty has a problem starting\r
+ */\r
+ public static void main(String[] args) throws Exception {\r
+ Security.setProperty("networkaddress.cache.ttl", "4");\r
+ Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+\r
+ // Check DB is accessible and contains the expected tables\r
+ if (!checkDatabase(logger))\r
+ System.exit(1);\r
+\r
+ logger.info("PROV0000 **** AT&T Data Router Provisioning Server starting....");\r
+\r
+ // Get properties\r
+ Properties p = (new DB()).getProperties();\r
+ int http_port = Integer.parseInt(p.getProperty("org.onap.dmaap.datarouter.provserver.http.port", "8080"));\r
+ int https_port = Integer.parseInt(p.getProperty("org.onap.dmaap.datarouter.provserver.https.port", "8443"));\r
+\r
+ // HTTP connector\r
+ SelectChannelConnector http = new SelectChannelConnector();\r
+ http.setPort(http_port);\r
+ http.setMaxIdleTime(300000);\r
+ http.setRequestHeaderSize(2048);\r
+ http.setAcceptors(2);\r
+ http.setConfidentialPort(https_port);\r
+ http.setLowResourcesConnections(20000);\r
+\r
+ // HTTPS connector\r
+ SslSelectChannelConnector https = new SslSelectChannelConnector();\r
+ https.setPort(https_port);\r
+ https.setMaxIdleTime(30000);\r
+ https.setRequestHeaderSize(8192);\r
+ https.setAcceptors(2);\r
+\r
+ // SSL stuff\r
+ SslContextFactory cf = https.getSslContextFactory();\r
+\r
+ /**Skip SSLv3 Fixes*/\r
+ cf.addExcludeProtocols("SSLv3");\r
+ logger.info("Excluded protocols prov-" + cf.getExcludeProtocols());\r
+ /**End of SSLv3 Fixes*/\r
+\r
+ cf.setKeyStoreType(p.getProperty(KEYSTORE_TYPE_PROPERTY, "jks"));\r
+ cf.setKeyStorePath(p.getProperty(KEYSTORE_PATH_PROPERTY));\r
+ cf.setKeyStorePassword(p.getProperty(KEYSTORE_PASSWORD_PROPERTY));\r
+ cf.setKeyManagerPassword(p.getProperty("org.onap.dmaap.datarouter.provserver.keymanager.password"));\r
+ String ts = p.getProperty(TRUSTSTORE_PATH_PROPERTY);\r
+ if (ts != null && ts.length() > 0) {\r
+ System.out.println("@@ TS -> " + ts);\r
+ cf.setTrustStore(ts);\r
+ cf.setTrustStorePassword(p.getProperty(TRUSTSTORE_PASSWORD_PROPERTY));\r
+ } else {\r
+ cf.setTrustStore(DEFAULT_TRUSTSTORE);\r
+ cf.setTrustStorePassword("changeit");\r
+ }\r
+ cf.setTrustStore("/opt/app/datartr/self_signed/cacerts.jks");\r
+ cf.setTrustStorePassword("changeit");\r
+ cf.setWantClientAuth(true);\r
+\r
+ // Servlet and Filter configuration\r
+ ServletContextHandler ctxt = new ServletContextHandler(0);\r
+ ctxt.setContextPath("/");\r
+ ctxt.addServlet(new ServletHolder(new FeedServlet()), "/feed/*");\r
+ ctxt.addServlet(new ServletHolder(new FeedLogServlet()), "/feedlog/*");\r
+ ctxt.addServlet(new ServletHolder(new PublishServlet()), "/publish/*");\r
+ ctxt.addServlet(new ServletHolder(new SubscribeServlet()), "/subscribe/*");\r
+ ctxt.addServlet(new ServletHolder(new StatisticsServlet()), "/statistics/*");\r
+ ctxt.addServlet(new ServletHolder(new SubLogServlet()), "/sublog/*");\r
+ ctxt.addServlet(new ServletHolder(new GroupServlet()), "/group/*"); //Provision groups - Rally US708115 -1610\r
+ ctxt.addServlet(new ServletHolder(new SubscriptionServlet()), "/subs/*");\r
+ ctxt.addServlet(new ServletHolder(new InternalServlet()), "/internal/*");\r
+ ctxt.addServlet(new ServletHolder(new RouteServlet()), "/internal/route/*");\r
+ ctxt.addServlet(new ServletHolder(new DRFeedsServlet()), "/");\r
+ ctxt.addFilter(new FilterHolder(new ThrottleFilter()), "/publish/*", FilterMapping.REQUEST);\r
+\r
+ ContextHandlerCollection contexts = new ContextHandlerCollection();\r
+ contexts.addHandler(ctxt);\r
+\r
+ // Request log configuration\r
+ NCSARequestLog nrl = new NCSARequestLog();\r
+ nrl.setFilename(p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir") + "/request.log.yyyy_mm_dd");\r
+ nrl.setFilenameDateFormat("yyyyMMdd");\r
+ nrl.setRetainDays(90);\r
+ nrl.setAppend(true);\r
+ nrl.setExtended(false);\r
+ nrl.setLogCookies(false);\r
+ nrl.setLogTimeZone("GMT");\r
+\r
+ RequestLogHandler reqlog = new RequestLogHandler();\r
+ reqlog.setRequestLog(nrl);\r
+\r
+ // Server's Handler collection\r
+ HandlerCollection hc = new HandlerCollection();\r
+ hc.setHandlers(new Handler[]{contexts, new DefaultHandler()});\r
+ hc.addHandler(reqlog);\r
+\r
+ // Server's thread pool\r
+ QueuedThreadPool pool = new QueuedThreadPool();\r
+ pool.setMinThreads(10);\r
+ pool.setMaxThreads(200);\r
+ pool.setDetailedDump(false);\r
+\r
+ // Daemon to clean up the log directory on a daily basis\r
+ Timer rolex = new Timer();\r
+ rolex.scheduleAtFixedRate(new PurgeLogDirTask(), 0, 86400000L); // run once per day\r
+\r
+ // Start LogfileLoader\r
+ LogfileLoader.getLoader();\r
+\r
+ // The server itself\r
+ server = new Server();\r
+ server.setThreadPool(pool);\r
+ server.setConnectors(new Connector[]{http, https});\r
+ server.setHandler(hc);\r
+ server.setStopAtShutdown(true);\r
+ server.setSendServerVersion(true);\r
+ server.setSendDateHeader(true);\r
+ server.setGracefulShutdown(5000); // allow 5 seconds for servlets to wrap up\r
+ server.setDumpAfterStart(false);\r
+ server.setDumpBeforeStop(false);\r
+\r
+ server.start();\r
+ server.join();\r
+ logger.info("PROV0001 **** AT&T Data Router Provisioning Server halted.");\r
+ }\r
+\r
+ private static boolean checkDatabase(Logger logger) {\r
+ DB db = new DB();\r
+ return db.runRetroFits();\r
+ }\r
+\r
+ /**\r
+ * Stop the Jetty server.\r
+ */\r
+ public static void shutdown() {\r
+ new Thread() {\r
+ @Override\r
+ public void run() {\r
+ try {\r
+ server.stop();\r
+ Thread.sleep(5000L);\r
+ System.exit(0);\r
+ } catch (Exception e) {\r
+ // ignore\r
+ }\r
+ }\r
+ }.start();\r
+ }\r
}\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.FileInputStream;\r
-import java.io.IOException;\r
-import java.io.InputStream;\r
-import java.net.HttpURLConnection;\r
-import java.net.InetAddress;\r
-import java.net.MalformedURLException;\r
-import java.net.URL;\r
-import java.net.UnknownHostException;\r
-import java.util.Arrays;\r
-import java.util.HashSet;\r
-import java.util.Map;\r
-import java.util.Properties;\r
-import java.util.Set;\r
-import java.util.Timer;\r
-import java.util.TimerTask;\r
-import java.util.TreeSet;\r
-\r
-import javax.servlet.ServletException;\r
-\r
-import org.apache.log4j.Logger;\r
-import org.json.JSONException;\r
-import org.json.JSONObject;\r
-import org.json.JSONTokener;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EgressRoute;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Group;\r
-import org.onap.dmaap.datarouter.provisioning.beans.IngressRoute;\r
-import org.onap.dmaap.datarouter.provisioning.beans.NetworkRoute;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Parameters;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Subscription;\r
-import org.onap.dmaap.datarouter.provisioning.utils.*;\r
-\r
-/**\r
- * This class handles the two timers (described in R1 Design Notes), and takes care of issuing\r
- * the GET to each node of the URL to "poke".\r
- *\r
- * @author Robert Eby\r
- * @version $Id: Poker.java,v 1.11 2014/01/08 16:13:47 eby Exp $\r
- */\r
-public class Poker extends TimerTask {\r
- /** Template used to generate the URL to issue the GET against */\r
- public static final String POKE_URL_TEMPLATE = "http://%s/internal/fetchProv";\r
- \r
- \r
- \r
-\r
- /** This is a singleton -- there is only one Poker object in the server */\r
- private static Poker p;\r
-\r
- /**\r
- * Get the singleton Poker object.\r
- * @return the Poker\r
- */\r
- public static synchronized Poker getPoker() {\r
- if (p == null)\r
- p = new Poker();\r
- return p;\r
- }\r
-\r
- private long timer1;\r
- private long timer2;\r
- private Timer rolex;\r
- private String this_pod; // DNS name of this machine\r
- private Logger logger;\r
- private String provstring;\r
-\r
- private Poker() {\r
- timer1 = timer2 = 0;\r
- rolex = new Timer();\r
- logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- try {\r
- this_pod = InetAddress.getLocalHost().getHostName();\r
- } catch (UnknownHostException e) {\r
- this_pod = "*UNKNOWN*"; // not a major problem\r
- }\r
- provstring = buildProvisioningString();\r
-\r
- rolex.scheduleAtFixedRate(this, 0L, 1000L); // Run once a second to check the timers\r
- }\r
-\r
- /**\r
- * This method sets the two timers described in the design notes.\r
- * @param t1 the first timer controls how long to wait after a provisioning request before poking each node\r
- * This timer can be reset if it has not "gone off".\r
- * @param t2 the second timer set the outer bound on how long to wait. It cannot be reset.\r
- */\r
- public void setTimers(long t1, long t2) {\r
- synchronized (this_pod) {\r
- if (timer1 == 0 || t1 > timer1)\r
- timer1 = t1;\r
- if (timer2 == 0)\r
- timer2 = t2;\r
- }\r
- if (logger.isDebugEnabled())\r
- logger.debug("Poker timers set to " + timer1 + " and " + timer2);\r
- \r
- \r
- }\r
-\r
- /**\r
- * Return the last provisioning string built.\r
- * @return the last provisioning string built.\r
- */\r
- public String getProvisioningString() {\r
- return provstring;\r
- }\r
-\r
- /**\r
- * The method to run at the predefined interval (once per second). This method checks\r
- * to see if either of the two timers has expired, and if so, will rebuild the provisioning\r
- * string, and poke all the nodes and other PODs. The timers are then reset to 0.\r
- */\r
- @Override\r
- public void run() {\r
- try {\r
- if (timer1 > 0) {\r
- long now = System.currentTimeMillis();\r
- boolean fire = false;\r
- synchronized (this_pod) {\r
- if (now > timer1 || now > timer2) {\r
- timer1 = timer2 = 0;\r
- fire = true;\r
- }\r
- }\r
- if (fire) {\r
- // Rebuild the prov string\r
- provstring = buildProvisioningString();\r
-\r
- // Only the active POD should poke nodes, etc.\r
- boolean active = SynchronizerTask.getSynchronizer().isActive();\r
- if (active) {\r
- // Poke all the DR nodes\r
- for (String n : BaseServlet.getNodes()) {\r
- pokeNode(n);\r
- }\r
- // Poke the pod that is not us\r
- for (String n : BaseServlet.getPods()) {\r
- if (n.length() > 0 && !n.equals(this_pod))\r
- pokeNode(n);\r
- }\r
- }\r
- }\r
- }\r
- } catch (Exception e) {\r
- logger.warn("PROV0020: Caught exception in Poker: "+e);\r
- e.printStackTrace();\r
- }\r
- }\r
- private void pokeNode(final String nodename) {\r
- logger.debug("PROV0012 Poking node " + nodename + " ...");\r
- Runnable r = new Runnable() {\r
- @Override\r
- public void run() {\r
- \r
- try {\r
- String u = String.format(POKE_URL_TEMPLATE, nodename+":"+DB.HTTP_PORT);\r
- URL url = new URL(u);\r
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();\r
- conn.setConnectTimeout(60000); //Fixes for Itrack DATARTR-3, poke timeout\r
- conn.connect();\r
- conn.getContentLength(); // Force the GET through\r
- conn.disconnect();\r
- } catch (MalformedURLException e) {\r
- logger.warn("PROV0013 MalformedURLException Error poking node "+nodename+": " + e.getMessage());\r
- } catch (IOException e) {\r
- logger.warn("PROV0013 IOException Error poking node "+nodename+": " + e.getMessage());\r
- }\r
- }\r
- };\r
-// Thread t = new Thread(r);\r
-// t.start();\r
- r.run();\r
- }\r
- @SuppressWarnings("unused")\r
- private String buildProvisioningString() {\r
- StringBuilder sb = new StringBuilder("{\n");\r
-\r
- // Append Feeds to the string\r
- String pfx = "\n";\r
- sb.append("\"feeds\": [");\r
- for (Feed f : Feed.getAllFeeds()) {\r
- sb.append(pfx);\r
- sb.append(f.asJSONObject().toString());\r
- pfx = ",\n";\r
- }\r
- sb.append("\n],\n");\r
- \r
- //Append groups to the string - Rally:US708115 - 1610 \r
- pfx = "\n"; \r
- sb.append("\"groups\": ["); \r
- for (Group s : Group.getAllgroups()) { \r
- sb.append(pfx); \r
- sb.append(s.asJSONObject().toString()); \r
- pfx = ",\n"; \r
- } \r
- sb.append("\n],\n"); \r
- \r
-\r
- // Append Subscriptions to the string\r
- pfx = "\n";\r
- sb.append("\"subscriptions\": [");\r
- for (Subscription s : Subscription.getAllSubscriptions()) {\r
- sb.append(pfx);\r
- if(s!=null)\r
- sb.append(s.asJSONObject().toString());\r
- pfx = ",\n";\r
- }\r
- sb.append("\n],\n");\r
-\r
- // Append Parameters to the string\r
- pfx = "\n";\r
- sb.append("\"parameters\": {");\r
- Map<String,String> props = Parameters.getParameters();\r
- Set<String> ivals = new HashSet<String>();\r
- String intv = props.get("_INT_VALUES");\r
- if (intv != null)\r
- ivals.addAll(Arrays.asList(intv.split("\\|")));\r
- for (String key : new TreeSet<String>(props.keySet())) {\r
- String v = props.get(key);\r
- sb.append(pfx);\r
- sb.append(" \"").append(key).append("\": ");\r
- if (ivals.contains(key)) {\r
- // integer value\r
- sb.append(v);\r
- } else if (key.endsWith("S")) {\r
- // Split and append array of strings\r
- String[] pp = v.split("\\|");\r
- String p2 = "";\r
- sb.append("[");\r
- for (String t : pp) {\r
- sb.append(p2).append("\"").append(quote(t)).append("\"");\r
- p2 = ",";\r
- }\r
- sb.append("]");\r
- } else {\r
- sb.append("\"").append(quote(v)).append("\"");\r
- }\r
- pfx = ",\n";\r
- }\r
- sb.append("\n},\n");\r
-\r
- // Append Routes to the string\r
- pfx = "\n";\r
- sb.append("\"ingress\": [");\r
- for (IngressRoute in : IngressRoute.getAllIngressRoutes()) {\r
- sb.append(pfx);\r
- sb.append(in.asJSONObject().toString());\r
- pfx = ",\n";\r
- }\r
- sb.append("\n],\n");\r
-\r
- pfx = "\n";\r
- sb.append("\"egress\": {");\r
- for (EgressRoute eg : EgressRoute.getAllEgressRoutes()) {\r
- sb.append(pfx);\r
- String t = eg.asJSONObject().toString();\r
- t = t.substring(1, t.length()-1);\r
- sb.append(t);\r
- pfx = ",\n";\r
- }\r
- sb.append("\n},\n");\r
-\r
- pfx = "\n";\r
- sb.append("\"routing\": [");\r
- for (NetworkRoute ne : NetworkRoute.getAllNetworkRoutes()) {\r
- sb.append(pfx);\r
- sb.append(ne.asJSONObject().toString());\r
- pfx = ",\n";\r
- }\r
- sb.append("\n]");\r
- sb.append("\n}");\r
-\r
- // Convert to string and verify it is valid JSON\r
- String provstring = sb.toString();\r
- try {\r
- new JSONObject(new JSONTokener(provstring));\r
- } catch (JSONException e) {\r
- logger.warn("PROV0016: Possible invalid prov string: "+e);\r
- }\r
- return provstring;\r
- }\r
- private String quote(String s) {\r
- StringBuilder sb = new StringBuilder();\r
- for (char ch : s.toCharArray()) {\r
- if (ch == '\\' || ch == '"') {\r
- sb.append('\\');\r
- }\r
- sb.append(ch);\r
- }\r
- return sb.toString();\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.TreeSet;
+
+import javax.servlet.ServletException;
+
+import org.apache.log4j.Logger;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.onap.dmaap.datarouter.provisioning.beans.EgressRoute;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.beans.Group;
+import org.onap.dmaap.datarouter.provisioning.beans.IngressRoute;
+import org.onap.dmaap.datarouter.provisioning.beans.NetworkRoute;
+import org.onap.dmaap.datarouter.provisioning.beans.Parameters;
+import org.onap.dmaap.datarouter.provisioning.beans.Subscription;
+import org.onap.dmaap.datarouter.provisioning.utils.*;
+
+/**
+ * This class handles the two timers (described in R1 Design Notes), and takes care of issuing
+ * the GET to each node of the URL to "poke".
+ *
+ * @author Robert Eby
+ * @version $Id: Poker.java,v 1.11 2014/01/08 16:13:47 eby Exp $
+ */
+public class Poker extends TimerTask {
+ /** Template used to generate the URL to issue the GET against */
+ public static final String POKE_URL_TEMPLATE = "http://%s/internal/fetchProv";
+
+
+
+
+ /** This is a singleton -- there is only one Poker object in the server */
+ private static Poker p;
+
+ /**
+ * Get the singleton Poker object.
+ * @return the Poker
+ */
+ public static synchronized Poker getPoker() {
+ if (p == null)
+ p = new Poker();
+ return p;
+ }
+
+ private long timer1;
+ private long timer2;
+ private Timer rolex;
+ private String this_pod; // DNS name of this machine
+ private Logger logger;
+ private String provstring;
+
+ private Poker() {
+ timer1 = timer2 = 0;
+ rolex = new Timer();
+ logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");
+ try {
+ this_pod = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ this_pod = "*UNKNOWN*"; // not a major problem
+ }
+ provstring = buildProvisioningString();
+
+ rolex.scheduleAtFixedRate(this, 0L, 1000L); // Run once a second to check the timers
+ }
+
+ /**
+ * This method sets the two timers described in the design notes.
+ * @param t1 the first timer controls how long to wait after a provisioning request before poking each node
+ * This timer can be reset if it has not "gone off".
+ * @param t2 the second timer set the outer bound on how long to wait. It cannot be reset.
+ */
+ public void setTimers(long t1, long t2) {
+ synchronized (this_pod) {
+ if (timer1 == 0 || t1 > timer1)
+ timer1 = t1;
+ if (timer2 == 0)
+ timer2 = t2;
+ }
+ if (logger.isDebugEnabled())
+ logger.debug("Poker timers set to " + timer1 + " and " + timer2);
+
+
+ }
+
+ /**
+ * Return the last provisioning string built.
+ * @return the last provisioning string built.
+ */
+ public String getProvisioningString() {
+ return provstring;
+ }
+
+ /**
+ * The method to run at the predefined interval (once per second). This method checks
+ * to see if either of the two timers has expired, and if so, will rebuild the provisioning
+ * string, and poke all the nodes and other PODs. The timers are then reset to 0.
+ */
+ @Override
+ public void run() {
+ try {
+ if (timer1 > 0) {
+ long now = System.currentTimeMillis();
+ boolean fire = false;
+ synchronized (this_pod) {
+ if (now > timer1 || now > timer2) {
+ timer1 = timer2 = 0;
+ fire = true;
+ }
+ }
+ if (fire) {
+ // Rebuild the prov string
+ provstring = buildProvisioningString();
+
+ // Only the active POD should poke nodes, etc.
+ boolean active = SynchronizerTask.getSynchronizer().isActive();
+ if (active) {
+ // Poke all the DR nodes
+ for (String n : BaseServlet.getNodes()) {
+ pokeNode(n);
+ }
+ // Poke the pod that is not us
+ for (String n : BaseServlet.getPods()) {
+ if (n.length() > 0 && !n.equals(this_pod))
+ pokeNode(n);
+ }
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("PROV0020: Caught exception in Poker: "+e);
+ e.printStackTrace();
+ }
+ }
+ private void pokeNode(final String nodename) {
+ logger.debug("PROV0012 Poking node " + nodename + " ...");
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+
+ try {
+ String u = String.format(POKE_URL_TEMPLATE, nodename+":"+DB.HTTP_PORT);
+ URL url = new URL(u);
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.setConnectTimeout(60000); //Fixes for Itrack DATARTR-3, poke timeout
+ conn.connect();
+ conn.getContentLength(); // Force the GET through
+ conn.disconnect();
+ } catch (MalformedURLException e) {
+ logger.warn("PROV0013 MalformedURLException Error poking node "+nodename+": " + e.getMessage());
+ } catch (IOException e) {
+ logger.warn("PROV0013 IOException Error poking node "+nodename+": " + e.getMessage());
+ }
+ }
+ };
+// Thread t = new Thread(r);
+// t.start();
+ r.run();
+ }
+ @SuppressWarnings("unused")
+ private String buildProvisioningString() {
+ StringBuilder sb = new StringBuilder("{\n");
+
+ // Append Feeds to the string
+ String pfx = "\n";
+ sb.append("\"feeds\": [");
+ for (Feed f : Feed.getAllFeeds()) {
+ sb.append(pfx);
+ sb.append(f.asJSONObject().toString());
+ pfx = ",\n";
+ }
+ sb.append("\n],\n");
+
+ //Append groups to the string - Rally:US708115 - 1610
+ pfx = "\n";
+ sb.append("\"groups\": [");
+ for (Group s : Group.getAllgroups()) {
+ sb.append(pfx);
+ sb.append(s.asJSONObject().toString());
+ pfx = ",\n";
+ }
+ sb.append("\n],\n");
+
+
+ // Append Subscriptions to the string
+ pfx = "\n";
+ sb.append("\"subscriptions\": [");
+ for (Subscription s : Subscription.getAllSubscriptions()) {
+ sb.append(pfx);
+ if(s!=null)
+ sb.append(s.asJSONObject().toString());
+ pfx = ",\n";
+ }
+ sb.append("\n],\n");
+
+ // Append Parameters to the string
+ pfx = "\n";
+ sb.append("\"parameters\": {");
+ Map<String,String> props = Parameters.getParameters();
+ Set<String> ivals = new HashSet<String>();
+ String intv = props.get("_INT_VALUES");
+ if (intv != null)
+ ivals.addAll(Arrays.asList(intv.split("\\|")));
+ for (String key : new TreeSet<String>(props.keySet())) {
+ String v = props.get(key);
+ sb.append(pfx);
+ sb.append(" \"").append(key).append("\": ");
+ if (ivals.contains(key)) {
+ // integer value
+ sb.append(v);
+ } else if (key.endsWith("S")) {
+ // Split and append array of strings
+ String[] pp = v.split("\\|");
+ String p2 = "";
+ sb.append("[");
+ for (String t : pp) {
+ sb.append(p2).append("\"").append(quote(t)).append("\"");
+ p2 = ",";
+ }
+ sb.append("]");
+ } else {
+ sb.append("\"").append(quote(v)).append("\"");
+ }
+ pfx = ",\n";
+ }
+ sb.append("\n},\n");
+
+ // Append Routes to the string
+ pfx = "\n";
+ sb.append("\"ingress\": [");
+ for (IngressRoute in : IngressRoute.getAllIngressRoutes()) {
+ sb.append(pfx);
+ sb.append(in.asJSONObject().toString());
+ pfx = ",\n";
+ }
+ sb.append("\n],\n");
+
+ pfx = "\n";
+ sb.append("\"egress\": {");
+ for (EgressRoute eg : EgressRoute.getAllEgressRoutes()) {
+ sb.append(pfx);
+ String t = eg.asJSONObject().toString();
+ t = t.substring(1, t.length()-1);
+ sb.append(t);
+ pfx = ",\n";
+ }
+ sb.append("\n},\n");
+
+ pfx = "\n";
+ sb.append("\"routing\": [");
+ for (NetworkRoute ne : NetworkRoute.getAllNetworkRoutes()) {
+ sb.append(pfx);
+ sb.append(ne.asJSONObject().toString());
+ pfx = ",\n";
+ }
+ sb.append("\n]");
+ sb.append("\n}");
+
+ // Convert to string and verify it is valid JSON
+ String provstring = sb.toString();
+ try {
+ new JSONObject(new JSONTokener(provstring));
+ } catch (JSONException e) {
+ logger.warn("PROV0016: Possible invalid prov string: "+e);
+ }
+ return provstring;
+ }
+ private String quote(String s) {
+ StringBuilder sb = new StringBuilder();
+ for (char ch : s.toCharArray()) {
+ if (ch == '\\' || ch == '"') {
+ sb.append('\\');
+ }
+ sb.append(ch);
+ }
+ return sb.toString();
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.File;\r
-import java.io.FileInputStream;\r
-import java.io.FileNotFoundException;\r
-import java.io.IOException;\r
-import java.io.InputStream;\r
-import java.net.URI;\r
-import java.security.KeyStore;\r
-import java.security.KeyStoreException;\r
-import java.util.Collections;\r
-import java.util.List;\r
-import java.util.Properties;\r
-\r
-import javax.servlet.ServletConfig;\r
-import javax.servlet.ServletException;\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.apache.commons.io.IOUtils;\r
-import org.apache.http.Header;\r
-import org.apache.http.HttpEntity;\r
-import org.apache.http.HttpResponse;\r
-import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;\r
-import org.apache.http.client.methods.HttpGet;\r
-import org.apache.http.client.methods.HttpRequestBase;\r
-import org.apache.http.conn.scheme.Scheme;\r
-import org.apache.http.conn.ssl.SSLSocketFactory;\r
-import org.apache.http.entity.BasicHttpEntity;\r
-import org.apache.http.impl.client.AbstractHttpClient;\r
-import org.apache.http.impl.client.DefaultHttpClient;\r
-import org.onap.dmaap.datarouter.provisioning.utils.DB;\r
-import org.onap.dmaap.datarouter.provisioning.utils.URLUtilities;\r
-\r
-/**\r
- * This class is the base class for those servlets that need to proxy their requests from the\r
- * standby to active server. Its methods perform the proxy function to the active server. If the\r
- * active server is not reachable, a 503 (SC_SERVICE_UNAVAILABLE) is returned. Only\r
- * DELETE/GET/PUT/POST are supported.\r
- *\r
- * @author Robert Eby\r
- * @version $Id: ProxyServlet.java,v 1.3 2014/03/24 18:47:10 eby Exp $\r
- */\r
-@SuppressWarnings("serial")\r
-public class ProxyServlet extends BaseServlet {\r
- private boolean inited = false;\r
- private Scheme sch;\r
-\r
- /**\r
- * Initialize this servlet, by setting up SSL.\r
- */\r
- @SuppressWarnings("deprecation")\r
- @Override\r
- public void init(ServletConfig config) throws ServletException {\r
- super.init(config);\r
- try {\r
- // Set up keystore\r
- Properties props = (new DB()).getProperties();\r
- String type = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");\r
- String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);\r
- String pass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);\r
- KeyStore keyStore = readStore(store, pass, type);\r
-\r
- store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);\r
- pass = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);\r
- if (store == null || store.length() == 0) {\r
- store = Main.DEFAULT_TRUSTSTORE;\r
- pass = "changeit";\r
- }\r
- KeyStore trustStore = readStore(store, pass, KeyStore.getDefaultType());\r
-\r
- // We are connecting with the node name, but the certificate will have the CNAME\r
- // So we need to accept a non-matching certificate name\r
- SSLSocketFactory socketFactory = new SSLSocketFactory(keyStore, "changeit", trustStore);\r
- socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);\r
- sch = new Scheme("https", 443, socketFactory);\r
- inited = true;\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- }\r
- intlogger.info("ProxyServlet: inited = "+inited);\r
- }\r
- private KeyStore readStore(String store, String pass, String type) throws KeyStoreException, FileNotFoundException {\r
- KeyStore ks = KeyStore.getInstance(type);\r
- FileInputStream instream = new FileInputStream(new File(store));\r
- try {\r
- ks.load(instream, pass.toCharArray());\r
- } catch (Exception x) {\r
- System.err.println("READING TRUSTSTORE: "+x);\r
- } finally {\r
- try { instream.close(); } catch (Exception ignore) {}\r
- }\r
- return ks;\r
- }\r
- /**\r
- * Return <i>true</i> if the requester has NOT set the <i>noproxy</i> CGI variable.\r
- * If they have, this indicates they want to forcibly turn the proxy off.\r
- * @param req the HTTP request\r
- * @return true or false\r
- */\r
- protected boolean isProxyOK(final HttpServletRequest req) {\r
- String t = req.getQueryString();\r
- if (t != null) {\r
- t = t.replaceAll("&", "&");\r
- for (String s : t.split("&")) {\r
- if (s.equals("noproxy") || s.startsWith("noproxy="))\r
- return false;\r
- }\r
- }\r
- return true;\r
- }\r
- /**\r
- * Is this the standby server? If it is, the proxy functions can be used.\r
- * If not, the proxy functions should not be called, and will send a response of 500\r
- * (Internal Server Error).\r
- * @return true if this server is the standby (and hence a proxy server).\r
- */\r
- public boolean isProxyServer() {\r
- SynchronizerTask st = SynchronizerTask.getSynchronizer();\r
- return st.getState() == SynchronizerTask.STANDBY;\r
- }\r
- /**\r
- * Issue a proxy DELETE to the active provisioning server.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- doProxy(req, resp, "DELETE");\r
- }\r
- /**\r
- * Issue a proxy GET to the active provisioning server.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- doProxy(req, resp, "GET");\r
- }\r
- /**\r
- * Issue a proxy PUT to the active provisioning server.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- doProxy(req, resp, "PUT");\r
- }\r
- /**\r
- * Issue a proxy POST to the active provisioning server.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- doProxy(req, resp, "POST");\r
- }\r
- /**\r
- * Issue a proxy GET to the active provisioning server. Unlike doGet() above,\r
- * this method will allow the caller to fall back to other code if the remote server is unreachable.\r
- * @return true if the proxy succeeded\r
- */\r
- public boolean doGetWithFallback(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- boolean rv = false;\r
- if (inited) {\r
- String url = buildUrl(req);\r
- intlogger.info("ProxyServlet: proxying with fallback GET "+url);\r
- AbstractHttpClient httpclient = new DefaultHttpClient();\r
- HttpRequestBase proxy = new HttpGet(url);\r
- try {\r
- httpclient.getConnectionManager().getSchemeRegistry().register(sch);\r
-\r
- // Copy request headers and request body\r
- copyRequestHeaders(req, proxy);\r
-\r
- // Execute the request\r
- HttpResponse pxy_response = httpclient.execute(proxy);\r
-\r
- // Get response headers and body\r
- int code = pxy_response.getStatusLine().getStatusCode();\r
- resp.setStatus(code);\r
- copyResponseHeaders(pxy_response, resp);\r
-\r
- HttpEntity entity = pxy_response.getEntity();\r
- if (entity != null) {\r
- InputStream in = entity.getContent();\r
- IOUtils.copy(in, resp.getOutputStream());\r
- in.close();\r
- }\r
- rv = true;\r
- } catch (IOException e) {\r
- System.err.println("ProxyServlet: "+e);\r
- e.printStackTrace();\r
- } finally {\r
- proxy.releaseConnection();\r
- httpclient.getConnectionManager().shutdown();\r
- }\r
- } else {\r
- intlogger.warn("ProxyServlet: proxy disabled");\r
- }\r
- return rv;\r
- }\r
- private void doProxy(HttpServletRequest req, HttpServletResponse resp, final String method) throws IOException {\r
- if (inited && isProxyServer()) {\r
- String url = buildUrl(req);\r
- intlogger.info("ProxyServlet: proxying "+method + " "+url);\r
- AbstractHttpClient httpclient = new DefaultHttpClient();\r
- ProxyHttpRequest proxy = new ProxyHttpRequest(method, url);\r
- try {\r
- httpclient.getConnectionManager().getSchemeRegistry().register(sch);\r
-\r
- // Copy request headers and request body\r
- copyRequestHeaders(req, proxy);\r
- if (method.equals("POST") || method.equals("PUT")){\r
- BasicHttpEntity body = new BasicHttpEntity();\r
- body.setContent(req.getInputStream());\r
- body.setContentLength(-1); // -1 = unknown\r
- proxy.setEntity(body);\r
- }\r
-\r
- // Execute the request\r
- HttpResponse pxy_response = httpclient.execute(proxy);\r
-\r
- // Get response headers and body\r
- int code = pxy_response.getStatusLine().getStatusCode();\r
- resp.setStatus(code);\r
- copyResponseHeaders(pxy_response, resp);\r
-\r
- HttpEntity entity = pxy_response.getEntity();\r
- if (entity != null) {\r
- InputStream in = entity.getContent();\r
- IOUtils.copy(in, resp.getOutputStream());\r
- in.close();\r
- }\r
- } catch (IOException e) {\r
- intlogger.warn("ProxyServlet: "+e);\r
- resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);\r
- e.printStackTrace();\r
- } finally {\r
- proxy.releaseConnection();\r
- httpclient.getConnectionManager().shutdown();\r
- }\r
- } else {\r
- intlogger.warn("ProxyServlet: proxy disabled");\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- }\r
- }\r
- private String buildUrl(HttpServletRequest req) {\r
- StringBuilder sb = new StringBuilder("https://");\r
- sb.append(URLUtilities.getPeerPodName());\r
- sb.append(req.getRequestURI());\r
- String q = req.getQueryString();\r
- if (q != null)\r
- sb.append("?").append(q);\r
- return sb.toString();\r
- }\r
- private void copyRequestHeaders(HttpServletRequest from, HttpRequestBase to) {\r
- @SuppressWarnings("unchecked")\r
- List<String> list = Collections.list(from.getHeaderNames());\r
- for (String name : list) {\r
- // Proxy code will add this one\r
- if (!name.equalsIgnoreCase("Content-Length"))\r
- to.addHeader(name, from.getHeader(name));\r
- }\r
- }\r
- private void copyResponseHeaders(HttpResponse from, HttpServletResponse to) {\r
- for (Header hdr : from.getAllHeaders()) {\r
- // Don't copy Date: our Jetty will add another Date header\r
- if (!hdr.getName().equals("Date"))\r
- to.addHeader(hdr.getName(), hdr.getValue());\r
- }\r
- }\r
-\r
- public class ProxyHttpRequest extends HttpEntityEnclosingRequestBase {\r
- private final String method;\r
-\r
- public ProxyHttpRequest(final String method, final String uri) {\r
- super();\r
- this.method = method;\r
- setURI(URI.create(uri));\r
- }\r
- @Override\r
- public String getMethod() {\r
- return method;\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Properties;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.conn.scheme.Scheme;
+import org.apache.http.conn.ssl.SSLSocketFactory;
+import org.apache.http.entity.BasicHttpEntity;
+import org.apache.http.impl.client.AbstractHttpClient;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.onap.dmaap.datarouter.provisioning.utils.DB;
+import org.onap.dmaap.datarouter.provisioning.utils.URLUtilities;
+
+/**
+ * This class is the base class for those servlets that need to proxy their requests from the
+ * standby to active server. Its methods perform the proxy function to the active server. If the
+ * active server is not reachable, a 503 (SC_SERVICE_UNAVAILABLE) is returned. Only
+ * DELETE/GET/PUT/POST are supported.
+ *
+ * @author Robert Eby
+ * @version $Id: ProxyServlet.java,v 1.3 2014/03/24 18:47:10 eby Exp $
+ */
+@SuppressWarnings("serial")
+public class ProxyServlet extends BaseServlet {
+ private boolean inited = false;
+ private Scheme sch;
+
+ /**
+ * Initialize this servlet, by setting up SSL.
+ */
+ @SuppressWarnings("deprecation")
+ @Override
+ public void init(ServletConfig config) throws ServletException {
+ super.init(config);
+ try {
+ // Set up keystore
+ Properties props = (new DB()).getProperties();
+ String type = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");
+ String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);
+ String pass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);
+ KeyStore keyStore = readStore(store, pass, type);
+
+ store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);
+ pass = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);
+ if (store == null || store.length() == 0) {
+ store = Main.DEFAULT_TRUSTSTORE;
+ pass = "changeit";
+ }
+ KeyStore trustStore = readStore(store, pass, KeyStore.getDefaultType());
+
+ // We are connecting with the node name, but the certificate will have the CNAME
+ // So we need to accept a non-matching certificate name
+ SSLSocketFactory socketFactory = new SSLSocketFactory(keyStore, "changeit", trustStore);
+ socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
+ sch = new Scheme("https", 443, socketFactory);
+ inited = true;
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ intlogger.info("ProxyServlet: inited = "+inited);
+ }
+ private KeyStore readStore(String store, String pass, String type) throws KeyStoreException, FileNotFoundException {
+ KeyStore ks = KeyStore.getInstance(type);
+ FileInputStream instream = new FileInputStream(new File(store));
+ try {
+ ks.load(instream, pass.toCharArray());
+ } catch (Exception x) {
+ System.err.println("READING TRUSTSTORE: "+x);
+ } finally {
+ try { instream.close(); } catch (Exception ignore) {}
+ }
+ return ks;
+ }
+ /**
+ * Return <i>true</i> if the requester has NOT set the <i>noproxy</i> CGI variable.
+ * If they have, this indicates they want to forcibly turn the proxy off.
+ * @param req the HTTP request
+ * @return true or false
+ */
+ protected boolean isProxyOK(final HttpServletRequest req) {
+ String t = req.getQueryString();
+ if (t != null) {
+ t = t.replaceAll("&", "&");
+ for (String s : t.split("&")) {
+ if (s.equals("noproxy") || s.startsWith("noproxy="))
+ return false;
+ }
+ }
+ return true;
+ }
+ /**
+ * Is this the standby server? If it is, the proxy functions can be used.
+ * If not, the proxy functions should not be called, and will send a response of 500
+ * (Internal Server Error).
+ * @return true if this server is the standby (and hence a proxy server).
+ */
+ public boolean isProxyServer() {
+ SynchronizerTask st = SynchronizerTask.getSynchronizer();
+ return st.getState() == SynchronizerTask.STANDBY;
+ }
+ /**
+ * Issue a proxy DELETE to the active provisioning server.
+ */
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ doProxy(req, resp, "DELETE");
+ }
+ /**
+ * Issue a proxy GET to the active provisioning server.
+ */
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ doProxy(req, resp, "GET");
+ }
+ /**
+ * Issue a proxy PUT to the active provisioning server.
+ */
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ doProxy(req, resp, "PUT");
+ }
+ /**
+ * Issue a proxy POST to the active provisioning server.
+ */
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ doProxy(req, resp, "POST");
+ }
+ /**
+ * Issue a proxy GET to the active provisioning server. Unlike doGet() above,
+ * this method will allow the caller to fall back to other code if the remote server is unreachable.
+ * @return true if the proxy succeeded
+ */
+ public boolean doGetWithFallback(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ boolean rv = false;
+ if (inited) {
+ String url = buildUrl(req);
+ intlogger.info("ProxyServlet: proxying with fallback GET "+url);
+ AbstractHttpClient httpclient = new DefaultHttpClient();
+ HttpRequestBase proxy = new HttpGet(url);
+ try {
+ httpclient.getConnectionManager().getSchemeRegistry().register(sch);
+
+ // Copy request headers and request body
+ copyRequestHeaders(req, proxy);
+
+ // Execute the request
+ HttpResponse pxy_response = httpclient.execute(proxy);
+
+ // Get response headers and body
+ int code = pxy_response.getStatusLine().getStatusCode();
+ resp.setStatus(code);
+ copyResponseHeaders(pxy_response, resp);
+
+ HttpEntity entity = pxy_response.getEntity();
+ if (entity != null) {
+ InputStream in = entity.getContent();
+ IOUtils.copy(in, resp.getOutputStream());
+ in.close();
+ }
+ rv = true;
+ } catch (IOException e) {
+ System.err.println("ProxyServlet: "+e);
+ e.printStackTrace();
+ } finally {
+ proxy.releaseConnection();
+ httpclient.getConnectionManager().shutdown();
+ }
+ } else {
+ intlogger.warn("ProxyServlet: proxy disabled");
+ }
+ return rv;
+ }
+ private void doProxy(HttpServletRequest req, HttpServletResponse resp, final String method) throws IOException {
+ if (inited && isProxyServer()) {
+ String url = buildUrl(req);
+ intlogger.info("ProxyServlet: proxying "+method + " "+url);
+ AbstractHttpClient httpclient = new DefaultHttpClient();
+ ProxyHttpRequest proxy = new ProxyHttpRequest(method, url);
+ try {
+ httpclient.getConnectionManager().getSchemeRegistry().register(sch);
+
+ // Copy request headers and request body
+ copyRequestHeaders(req, proxy);
+ if (method.equals("POST") || method.equals("PUT")){
+ BasicHttpEntity body = new BasicHttpEntity();
+ body.setContent(req.getInputStream());
+ body.setContentLength(-1); // -1 = unknown
+ proxy.setEntity(body);
+ }
+
+ // Execute the request
+ HttpResponse pxy_response = httpclient.execute(proxy);
+
+ // Get response headers and body
+ int code = pxy_response.getStatusLine().getStatusCode();
+ resp.setStatus(code);
+ copyResponseHeaders(pxy_response, resp);
+
+ HttpEntity entity = pxy_response.getEntity();
+ if (entity != null) {
+ InputStream in = entity.getContent();
+ IOUtils.copy(in, resp.getOutputStream());
+ in.close();
+ }
+ } catch (IOException e) {
+ intlogger.warn("ProxyServlet: "+e);
+ resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
+ e.printStackTrace();
+ } finally {
+ proxy.releaseConnection();
+ httpclient.getConnectionManager().shutdown();
+ }
+ } else {
+ intlogger.warn("ProxyServlet: proxy disabled");
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ }
+ }
+ private String buildUrl(HttpServletRequest req) {
+ StringBuilder sb = new StringBuilder("https://");
+ sb.append(URLUtilities.getPeerPodName());
+ sb.append(req.getRequestURI());
+ String q = req.getQueryString();
+ if (q != null)
+ sb.append("?").append(q);
+ return sb.toString();
+ }
+ private void copyRequestHeaders(HttpServletRequest from, HttpRequestBase to) {
+ @SuppressWarnings("unchecked")
+ List<String> list = Collections.list(from.getHeaderNames());
+ for (String name : list) {
+ // Proxy code will add this one
+ if (!name.equalsIgnoreCase("Content-Length"))
+ to.addHeader(name, from.getHeader(name));
+ }
+ }
+ private void copyResponseHeaders(HttpResponse from, HttpServletResponse to) {
+ for (Header hdr : from.getAllHeaders()) {
+ // Don't copy Date: our Jetty will add another Date header
+ if (!hdr.getName().equals("Date"))
+ to.addHeader(hdr.getName(), hdr.getValue());
+ }
+ }
+
+ public class ProxyHttpRequest extends HttpEntityEnclosingRequestBase {
+ private final String method;
+
+ public ProxyHttpRequest(final String method, final String uri) {
+ super();
+ this.method = method;
+ setURI(URI.create(uri));
+ }
+ @Override
+ public String getMethod() {
+ return method;
+ }
+ }
+}
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.IOException;\r
-import java.io.InputStream;\r
-import java.util.ArrayList;\r
-import java.util.Collection;\r
-import java.util.List;\r
-import java.util.Properties;\r
-\r
-import javax.servlet.ServletConfig;\r
-import javax.servlet.ServletException;\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.json.JSONArray;\r
-import org.json.JSONObject;\r
-import org.json.JSONTokener;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.beans.IngressRoute;\r
-import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;\r
-import org.onap.dmaap.datarouter.provisioning.utils.*;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * This servlet handles redirects for the <publishURL> on the provisioning server,\r
- * which is generated by the provisioning server to handle a particular subscriptions to a feed.\r
- * See the <b>File Publishing and Delivery API</b> document for details on how these methods\r
- * should be invoked.\r
- *\r
- * @author Robert Eby\r
- * @version $Id: PublishServlet.java,v 1.8 2014/03/12 19:45:41 eby Exp $\r
- */\r
-@SuppressWarnings("serial")\r
-public class PublishServlet extends BaseServlet {\r
- private int next_node;\r
- private String provstring;\r
- private List<IngressRoute> irt;\r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.PublishServlet");\r
- \r
-\r
- @Override\r
- public void init(ServletConfig config) throws ServletException {\r
- super.init(config);\r
- next_node = 0;\r
- provstring = "";\r
- irt = new ArrayList<IngressRoute>();\r
- \r
- }\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- redirect(req, resp);\r
- }\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- redirect(req, resp);\r
- }\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- redirect(req, resp);\r
- }\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- redirect(req, resp);\r
- }\r
- private void redirect(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- String[] nodes = getNodes();\r
- if (nodes == null || nodes.length == 0) {\r
- resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "There are no nodes defined in the DR network.");\r
- } else {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- int feedid = checkPath(req);\r
- if (feedid < 0) {\r
- String message = (feedid == -1)\r
- ? "Invalid request - Missing or bad feed number."\r
- : "Invalid request - Missing file ID.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
-\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- } else {\r
- // Generate new URL\r
- String nextnode = getRedirectNode(feedid, req);\r
- nextnode = nextnode+":"+DB.HTTPS_PORT;\r
- String newurl = "https://" + nextnode + "/publish" + req.getPathInfo();\r
- String qs = req.getQueryString();\r
- if (qs != null)\r
- newurl += "?" + qs;\r
-\r
- // Log redirect in event log\r
- String message = "Redirected to: "+newurl;\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_MOVED_PERMANENTLY);\r
- eventlogger.info(elr);\r
-\r
- resp.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);\r
- resp.setHeader("Location", newurl);\r
- }\r
- }\r
- }\r
- private String getRedirectNode(int feedid, HttpServletRequest req) {\r
- // Check to see if the IRT needs to be updated\r
- Poker p = Poker.getPoker();\r
- String s = p.getProvisioningString();\r
- synchronized (provstring) {\r
- if (irt == null || (s.length() != provstring.length()) || !s.equals(provstring)) {\r
- // Provisioning string has changed -- update the IRT\r
- provstring = s;\r
- JSONObject jo = new JSONObject(new JSONTokener(provstring));\r
- JSONArray ja = jo.getJSONArray("ingress");\r
- List<IngressRoute> newlist = new ArrayList<IngressRoute>();\r
- for (int i = 0; i < ja.length(); i++) {\r
- IngressRoute iroute = new IngressRoute(ja.getJSONObject(i));\r
- newlist.add(iroute);\r
- }\r
- irt = newlist;\r
- }\r
- }\r
-\r
- // Look in IRT for next node\r
- for (IngressRoute route : irt) {\r
- if (route.matches(feedid, req)) {\r
- // pick a node at random from the list\r
- Collection<String> nodes = route.getNodes();\r
- String[] arr = nodes.toArray(new String[0]);\r
- long id = System.currentTimeMillis() % arr.length;\r
- String node = arr[(int) id];\r
- intlogger.info("Redirecting to "+node+" because of route "+route);\r
- return node;\r
- }\r
- }\r
-\r
- // No IRT rule matches, do round robin of all active nodes\r
- String[] nodes = getNodes();\r
- if (next_node >= nodes.length) // The list of nodes may have grown/shrunk\r
- next_node = 0;\r
- return nodes[next_node++];\r
- }\r
- private int checkPath(HttpServletRequest req) {\r
- String path = req.getPathInfo();\r
- if (path == null || path.length() < 2)\r
- return -1;\r
- path = path.substring(1);\r
- int ix = path.indexOf('/');\r
- if (ix < 0 || ix == path.length()-1)\r
- return -2;\r
- try {\r
- int feedid = Integer.parseInt(path.substring(0, ix));\r
- if (!Feed.isFeedValid(feedid))\r
- return -1;\r
- return feedid;\r
- } catch (NumberFormatException e) {\r
- return -1;\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import javax.servlet.ServletConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.beans.IngressRoute;
+import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;
+import org.onap.dmaap.datarouter.provisioning.utils.*;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * This servlet handles redirects for the <publishURL> on the provisioning server,
+ * which is generated by the provisioning server to handle a particular subscriptions to a feed.
+ * See the <b>File Publishing and Delivery API</b> document for details on how these methods
+ * should be invoked.
+ *
+ * @author Robert Eby
+ * @version $Id: PublishServlet.java,v 1.8 2014/03/12 19:45:41 eby Exp $
+ */
+@SuppressWarnings("serial")
+public class PublishServlet extends BaseServlet {
+ private int next_node;
+ private String provstring;
+ private List<IngressRoute> irt;
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.PublishServlet");
+
+
+ @Override
+ public void init(ServletConfig config) throws ServletException {
+ super.init(config);
+ next_node = 0;
+ provstring = "";
+ irt = new ArrayList<IngressRoute>();
+
+ }
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ redirect(req, resp);
+ }
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ redirect(req, resp);
+ }
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_FEEDID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ redirect(req, resp);
+ }
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPost");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
+ redirect(req, resp);
+ }
+ private void redirect(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ String[] nodes = getNodes();
+ if (nodes == null || nodes.length == 0) {
+ resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "There are no nodes defined in the DR network.");
+ } else {
+ EventLogRecord elr = new EventLogRecord(req);
+ int feedid = checkPath(req);
+ if (feedid < 0) {
+ String message = (feedid == -1)
+ ? "Invalid request - Missing or bad feed number."
+ : "Invalid request - Missing file ID.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ } else {
+ // Generate new URL
+ String nextnode = getRedirectNode(feedid, req);
+ nextnode = nextnode+":"+DB.HTTPS_PORT;
+ String newurl = "https://" + nextnode + "/publish" + req.getPathInfo();
+ String qs = req.getQueryString();
+ if (qs != null)
+ newurl += "?" + qs;
+
+ // Log redirect in event log
+ String message = "Redirected to: "+newurl;
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_MOVED_PERMANENTLY);
+ eventlogger.info(elr);
+
+ resp.setStatus(HttpServletResponse.SC_MOVED_PERMANENTLY);
+ resp.setHeader("Location", newurl);
+ }
+ }
+ }
+ private String getRedirectNode(int feedid, HttpServletRequest req) {
+ // Check to see if the IRT needs to be updated
+ Poker p = Poker.getPoker();
+ String s = p.getProvisioningString();
+ synchronized (provstring) {
+ if (irt == null || (s.length() != provstring.length()) || !s.equals(provstring)) {
+ // Provisioning string has changed -- update the IRT
+ provstring = s;
+ JSONObject jo = new JSONObject(new JSONTokener(provstring));
+ JSONArray ja = jo.getJSONArray("ingress");
+ List<IngressRoute> newlist = new ArrayList<IngressRoute>();
+ for (int i = 0; i < ja.length(); i++) {
+ IngressRoute iroute = new IngressRoute(ja.getJSONObject(i));
+ newlist.add(iroute);
+ }
+ irt = newlist;
+ }
+ }
+
+ // Look in IRT for next node
+ for (IngressRoute route : irt) {
+ if (route.matches(feedid, req)) {
+ // pick a node at random from the list
+ Collection<String> nodes = route.getNodes();
+ String[] arr = nodes.toArray(new String[0]);
+ long id = System.currentTimeMillis() % arr.length;
+ String node = arr[(int) id];
+ intlogger.info("Redirecting to "+node+" because of route "+route);
+ return node;
+ }
+ }
+
+ // No IRT rule matches, do round robin of all active nodes
+ String[] nodes = getNodes();
+ if (next_node >= nodes.length) // The list of nodes may have grown/shrunk
+ next_node = 0;
+ return nodes[next_node++];
+ }
+ private int checkPath(HttpServletRequest req) {
+ String path = req.getPathInfo();
+ if (path == null || path.length() < 2)
+ return -1;
+ path = path.substring(1);
+ int ix = path.indexOf('/');
+ if (ix < 0 || ix == path.length()-1)
+ return -2;
+ try {
+ int feedid = Integer.parseInt(path.substring(0, ix));
+ if (!Feed.isFeedValid(feedid))
+ return -1;
+ return feedid;
+ } catch (NumberFormatException e) {
+ return -1;
+ }
+ }
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*/\r
@SuppressWarnings("serial")\r
public class RouteServlet extends ProxyServlet {\r
- /**\r
- * DELETE route table entries by deleting part of the route table tree.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doDelete(req, resp);\r
- return;\r
- }\r
+ /**\r
+ * DELETE route table entries by deleting part of the route table tree.\r
+ */\r
+ @Override\r
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ if (!isAuthorizedForInternal(req)) {\r
+ elr.setMessage("Unauthorized.");\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
+ return;\r
+ }\r
+ if (isProxyOK(req) && isProxyServer()) {\r
+ super.doDelete(req, resp);\r
+ return;\r
+ }\r
\r
- String path = req.getPathInfo();\r
- String[] parts = path.substring(1).split("/");\r
- Deleteable[] d = null;\r
- if (parts[0].equals("ingress")) {\r
- if (parts.length == 4) {\r
- // /internal/route/ingress/<feed>/<user>/<subnet>\r
- try {\r
- int feedid = Integer.parseInt(parts[1]);\r
- IngressRoute er = IngressRoute.getIngressRoute(feedid, parts[2], parts[3].replaceAll("!", "/"));\r
- if (er == null) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified ingress route does not exist.");\r
- return;\r
- }\r
- d = new Deleteable[] { er };\r
- } catch (NumberFormatException e) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid feed ID in 'delete ingress' command.");\r
- return;\r
- }\r
- } else if (parts.length == 2) {\r
- // /internal/route/ingress/<seq>\r
- try {\r
- int seq = Integer.parseInt(parts[1]);\r
- Set<IngressRoute> set = IngressRoute.getIngressRoutesForSeq(seq);\r
- d = set.toArray(new Deleteable[0]);\r
- } catch (NumberFormatException e) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid sequence number in 'delete ingress' command.");\r
- return;\r
- }\r
- } else {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete ingress' command.");\r
- return;\r
- }\r
- } else if (parts[0].equals("egress")) {\r
- if (parts.length == 2) {\r
- // /internal/route/egress/<sub>\r
- try {\r
- int subid = Integer.parseInt(parts[1]);\r
- EgressRoute er = EgressRoute.getEgressRoute(subid);\r
- if (er == null) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified egress route does not exist.");\r
- return;\r
- }\r
- d = new Deleteable[] { er };\r
- } catch (NumberFormatException e) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid sub ID in 'delete egress' command.");\r
- return;\r
- }\r
- } else {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete egress' command.");\r
- return;\r
- }\r
- } else if (parts[0].equals("network")) {\r
- if (parts.length == 3) {\r
- // /internal/route/network/<from>/<to>\r
- try {//\r
- NetworkRoute nr = new NetworkRoute(\r
- NodeClass.normalizeNodename(parts[1]),\r
- NodeClass.normalizeNodename(parts[2])\r
- );\r
- d = new Deleteable[] { nr };\r
- } catch (IllegalArgumentException e) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified network route does not exist.");\r
- return;\r
- }\r
- } else {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete network' command.");\r
- return;\r
- }\r
- }\r
- if (d == null) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- return;\r
- }\r
- boolean rv = true;\r
- for (Deleteable dd : d) {\r
- rv &= doDelete(dd);\r
- }\r
- if (rv) {\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- provisioningDataChanged();\r
- provisioningParametersChanged();\r
- } else {\r
- // Something went wrong with the DELETE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
- /**\r
- * GET route table entries from the route table tree specified by the URL path.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
+ String path = req.getPathInfo();\r
+ String[] parts = path.substring(1).split("/");\r
+ Deleteable[] d = null;\r
+ if (parts[0].equals("ingress")) {\r
+ if (parts.length == 4) {\r
+ // /internal/route/ingress/<feed>/<user>/<subnet>\r
+ try {\r
+ int feedid = Integer.parseInt(parts[1]);\r
+ IngressRoute er = IngressRoute.getIngressRoute(feedid, parts[2], parts[3].replaceAll("!", "/"));\r
+ if (er == null) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified ingress route does not exist.");\r
+ return;\r
+ }\r
+ d = new Deleteable[] { er };\r
+ } catch (NumberFormatException e) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid feed ID in 'delete ingress' command.");\r
+ return;\r
+ }\r
+ } else if (parts.length == 2) {\r
+ // /internal/route/ingress/<seq>\r
+ try {\r
+ int seq = Integer.parseInt(parts[1]);\r
+ Set<IngressRoute> set = IngressRoute.getIngressRoutesForSeq(seq);\r
+ d = set.toArray(new Deleteable[0]);\r
+ } catch (NumberFormatException e) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid sequence number in 'delete ingress' command.");\r
+ return;\r
+ }\r
+ } else {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete ingress' command.");\r
+ return;\r
+ }\r
+ } else if (parts[0].equals("egress")) {\r
+ if (parts.length == 2) {\r
+ // /internal/route/egress/<sub>\r
+ try {\r
+ int subid = Integer.parseInt(parts[1]);\r
+ EgressRoute er = EgressRoute.getEgressRoute(subid);\r
+ if (er == null) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified egress route does not exist.");\r
+ return;\r
+ }\r
+ d = new Deleteable[] { er };\r
+ } catch (NumberFormatException e) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid sub ID in 'delete egress' command.");\r
+ return;\r
+ }\r
+ } else {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete egress' command.");\r
+ return;\r
+ }\r
+ } else if (parts[0].equals("network")) {\r
+ if (parts.length == 3) {\r
+ // /internal/route/network/<from>/<to>\r
+ try {//\r
+ NetworkRoute nr = new NetworkRoute(\r
+ NodeClass.normalizeNodename(parts[1]),\r
+ NodeClass.normalizeNodename(parts[2])\r
+ );\r
+ d = new Deleteable[] { nr };\r
+ } catch (IllegalArgumentException e) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "The specified network route does not exist.");\r
+ return;\r
+ }\r
+ } else {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid number of arguments in 'delete network' command.");\r
+ return;\r
+ }\r
+ }\r
+ if (d == null) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
+ return;\r
+ }\r
+ boolean rv = true;\r
+ for (Deleteable dd : d) {\r
+ rv &= doDelete(dd);\r
+ }\r
+ if (rv) {\r
+ elr.setResult(HttpServletResponse.SC_OK);\r
+ eventlogger.info(elr);\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ provisioningDataChanged();\r
+ provisioningParametersChanged();\r
+ } else {\r
+ // Something went wrong with the DELETE\r
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
+ }\r
+ }\r
+ /**\r
+ * GET route table entries from the route table tree specified by the URL path.\r
+ */\r
+ @Override\r
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ if (!isAuthorizedForInternal(req)) {\r
+ elr.setMessage("Unauthorized.");\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
+ return;\r
+ }\r
+ if (isProxyOK(req) && isProxyServer()) {\r
+ super.doGet(req, resp);\r
+ return;\r
+ }\r
\r
- String path = req.getPathInfo();\r
- if (!path.endsWith("/"))\r
- path += "/";\r
- if (!path.equals("/") && !path.equals("/ingress/") && !path.equals("/egress/") && !path.equals("/network/")) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- return;\r
- }\r
+ String path = req.getPathInfo();\r
+ if (!path.endsWith("/"))\r
+ path += "/";\r
+ if (!path.equals("/") && !path.equals("/ingress/") && !path.equals("/egress/") && !path.equals("/network/")) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
+ return;\r
+ }\r
\r
- StringBuilder sb = new StringBuilder("{\n");\r
- String px2 = "";\r
- if (path.equals("/") || path.equals("/ingress/")) {\r
- String pfx = "\n";\r
- sb.append("\"ingress\": [");\r
- for (IngressRoute in : IngressRoute.getAllIngressRoutes()) {\r
- sb.append(pfx);\r
- sb.append(in.asJSONObject().toString());\r
- pfx = ",\n";\r
- }\r
- sb.append("\n]");\r
- px2 = ",\n";\r
- }\r
+ StringBuilder sb = new StringBuilder("{\n");\r
+ String px2 = "";\r
+ if (path.equals("/") || path.equals("/ingress/")) {\r
+ String pfx = "\n";\r
+ sb.append("\"ingress\": [");\r
+ for (IngressRoute in : IngressRoute.getAllIngressRoutes()) {\r
+ sb.append(pfx);\r
+ sb.append(in.asJSONObject().toString());\r
+ pfx = ",\n";\r
+ }\r
+ sb.append("\n]");\r
+ px2 = ",\n";\r
+ }\r
\r
- if (path.equals("/") || path.equals("/egress/")) {\r
- String pfx = "\n";\r
- sb.append(px2);\r
- sb.append("\"egress\": {");\r
- for (EgressRoute eg : EgressRoute.getAllEgressRoutes()) {\r
- JSONObject jx = eg.asJSONObject();\r
- for (String key : jx.keySet()) {\r
- sb.append(pfx);\r
- sb.append(" \"").append(key).append("\": ");\r
- sb.append("\"").append(jx.getString(key)).append("\"");\r
- pfx = ",\n";\r
- }\r
- }\r
- sb.append("\n}");\r
- px2 = ",\n";\r
- }\r
+ if (path.equals("/") || path.equals("/egress/")) {\r
+ String pfx = "\n";\r
+ sb.append(px2);\r
+ sb.append("\"egress\": {");\r
+ for (EgressRoute eg : EgressRoute.getAllEgressRoutes()) {\r
+ JSONObject jx = eg.asJSONObject();\r
+ for (String key : jx.keySet()) {\r
+ sb.append(pfx);\r
+ sb.append(" \"").append(key).append("\": ");\r
+ sb.append("\"").append(jx.getString(key)).append("\"");\r
+ pfx = ",\n";\r
+ }\r
+ }\r
+ sb.append("\n}");\r
+ px2 = ",\n";\r
+ }\r
\r
- if (path.equals("/") || path.equals("/network/")) {\r
- String pfx = "\n";\r
- sb.append(px2);\r
- sb.append("\"routing\": [");\r
- for (NetworkRoute ne : NetworkRoute.getAllNetworkRoutes()) {\r
- sb.append(pfx);\r
- sb.append(ne.asJSONObject().toString());\r
- pfx = ",\n";\r
- }\r
- sb.append("\n]");\r
- }\r
- sb.append("}\n");\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType("application/json");\r
- resp.getOutputStream().print(sb.toString());\r
- }\r
- /**\r
- * PUT on </internal/route/*> -- not supported.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- }\r
- /**\r
- * POST - modify existing route table entries in the route table tree specified by the URL path.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- EventLogRecord elr = new EventLogRecord(req);\r
- if (!isAuthorizedForInternal(req)) {\r
- elr.setMessage("Unauthorized.");\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
- return;\r
- }\r
- if (isProxyOK(req) && isProxyServer()) {\r
- super.doPost(req, resp);\r
- return;\r
- }\r
- String path = req.getPathInfo();\r
- Insertable[] ins = null;\r
- if (path.startsWith("/ingress/")) {\r
- // /internal/route/ingress/?feed=%s&user=%s&subnet=%s&nodepatt=%s\r
- try {\r
- // Although it probably doesn't make sense, you can install two identical routes in the IRT\r
- int feedid = Integer.parseInt(req.getParameter("feed"));\r
- String user = req.getParameter("user");\r
- if (user == null)\r
- user = "-";\r
- String subnet = req.getParameter("subnet");\r
- if (subnet == null)\r
- subnet = "-";\r
- String nodepatt = req.getParameter("nodepatt");\r
- String t = req.getParameter("seq");\r
- int seq = (t != null) ? Integer.parseInt(t) : (IngressRoute.getMaxSequence() + 100);\r
- ins = new Insertable[] { new IngressRoute(seq, feedid, user, subnet, NodeClass.lookupNodeNames(nodepatt)) };\r
- } catch (Exception e) {\r
- intlogger.info(e);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add ingress' command.");\r
- return;\r
- }\r
- } else if (path.startsWith("/egress/")) {\r
- // /internal/route/egress/?sub=%s&node=%s\r
- try {\r
- int subid = Integer.parseInt(req.getParameter("sub"));\r
- EgressRoute er = EgressRoute.getEgressRoute(subid);\r
- if (er != null) {\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "An egress route already exists for that subscriber.");\r
- return;\r
- }\r
- String node = NodeClass.normalizeNodename(req.getParameter("node"));\r
- ins = new Insertable[] { new EgressRoute(subid, node) };\r
- } catch (Exception e) {\r
- intlogger.info(e);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add egress' command.");\r
- return;\r
- }\r
- } else if (path.startsWith("/network/")) {\r
- // /internal/route/network/?from=%s&to=%s&via=%s\r
- try {\r
- String nfrom = req.getParameter("from");\r
- String nto = req.getParameter("to");\r
- String nvia = req.getParameter("via");\r
- if (nfrom == null || nto == null || nvia == null) {\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing arguments in 'add network' command.");\r
- return;\r
- }\r
- nfrom = NodeClass.normalizeNodename(nfrom);\r
- nto = NodeClass.normalizeNodename(nto);\r
- nvia = NodeClass.normalizeNodename(nvia);\r
- NetworkRoute nr = new NetworkRoute(nfrom, nto, nvia);\r
- for (NetworkRoute route : NetworkRoute.getAllNetworkRoutes()) {\r
- if (route.getFromnode() == nr.getFromnode() && route.getTonode() == nr.getTonode()) {\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Network route table already contains a route for "+nfrom+" and "+nto);\r
- return;\r
- }\r
- }\r
- ins = new Insertable[] { nr };\r
- } catch (IllegalArgumentException e) {\r
- intlogger.info(e);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add network' command.");\r
- return;\r
- }\r
- }\r
- if (ins == null) {\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
- return;\r
- }\r
- boolean rv = true;\r
- for (Insertable dd : ins) {\r
- rv &= doInsert(dd);\r
- }\r
- if (rv) {\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- provisioningDataChanged();\r
- provisioningParametersChanged();\r
- } else {\r
- // Something went wrong with the INSERT\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
+ if (path.equals("/") || path.equals("/network/")) {\r
+ String pfx = "\n";\r
+ sb.append(px2);\r
+ sb.append("\"routing\": [");\r
+ for (NetworkRoute ne : NetworkRoute.getAllNetworkRoutes()) {\r
+ sb.append(pfx);\r
+ sb.append(ne.asJSONObject().toString());\r
+ pfx = ",\n";\r
+ }\r
+ sb.append("\n]");\r
+ }\r
+ sb.append("}\n");\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ resp.setContentType("application/json");\r
+ resp.getOutputStream().print(sb.toString());\r
+ }\r
+ /**\r
+ * PUT on </internal/route/*> -- not supported.\r
+ */\r
+ @Override\r
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ if (!isAuthorizedForInternal(req)) {\r
+ elr.setMessage("Unauthorized.");\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
+ return;\r
+ }\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
+ }\r
+ /**\r
+ * POST - modify existing route table entries in the route table tree specified by the URL path.\r
+ */\r
+ @Override\r
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ if (!isAuthorizedForInternal(req)) {\r
+ elr.setMessage("Unauthorized.");\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized.");\r
+ return;\r
+ }\r
+ if (isProxyOK(req) && isProxyServer()) {\r
+ super.doPost(req, resp);\r
+ return;\r
+ }\r
+ String path = req.getPathInfo();\r
+ Insertable[] ins = null;\r
+ if (path.startsWith("/ingress/")) {\r
+ // /internal/route/ingress/?feed=%s&user=%s&subnet=%s&nodepatt=%s\r
+ try {\r
+ // Although it probably doesn't make sense, you can install two identical routes in the IRT\r
+ int feedid = Integer.parseInt(req.getParameter("feed"));\r
+ String user = req.getParameter("user");\r
+ if (user == null)\r
+ user = "-";\r
+ String subnet = req.getParameter("subnet");\r
+ if (subnet == null)\r
+ subnet = "-";\r
+ String nodepatt = req.getParameter("nodepatt");\r
+ String t = req.getParameter("seq");\r
+ int seq = (t != null) ? Integer.parseInt(t) : (IngressRoute.getMaxSequence() + 100);\r
+ ins = new Insertable[] { new IngressRoute(seq, feedid, user, subnet, NodeClass.lookupNodeNames(nodepatt)) };\r
+ } catch (Exception e) {\r
+ intlogger.info(e);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add ingress' command.");\r
+ return;\r
+ }\r
+ } else if (path.startsWith("/egress/")) {\r
+ // /internal/route/egress/?sub=%s&node=%s\r
+ try {\r
+ int subid = Integer.parseInt(req.getParameter("sub"));\r
+ EgressRoute er = EgressRoute.getEgressRoute(subid);\r
+ if (er != null) {\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "An egress route already exists for that subscriber.");\r
+ return;\r
+ }\r
+ String node = NodeClass.normalizeNodename(req.getParameter("node"));\r
+ ins = new Insertable[] { new EgressRoute(subid, node) };\r
+ } catch (Exception e) {\r
+ intlogger.info(e);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add egress' command.");\r
+ return;\r
+ }\r
+ } else if (path.startsWith("/network/")) {\r
+ // /internal/route/network/?from=%s&to=%s&via=%s\r
+ try {\r
+ String nfrom = req.getParameter("from");\r
+ String nto = req.getParameter("to");\r
+ String nvia = req.getParameter("via");\r
+ if (nfrom == null || nto == null || nvia == null) {\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Missing arguments in 'add network' command.");\r
+ return;\r
+ }\r
+ nfrom = NodeClass.normalizeNodename(nfrom);\r
+ nto = NodeClass.normalizeNodename(nto);\r
+ nvia = NodeClass.normalizeNodename(nvia);\r
+ NetworkRoute nr = new NetworkRoute(nfrom, nto, nvia);\r
+ for (NetworkRoute route : NetworkRoute.getAllNetworkRoutes()) {\r
+ if (route.getFromnode() == nr.getFromnode() && route.getTonode() == nr.getTonode()) {\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Network route table already contains a route for "+nfrom+" and "+nto);\r
+ return;\r
+ }\r
+ }\r
+ ins = new Insertable[] { nr };\r
+ } catch (IllegalArgumentException e) {\r
+ intlogger.info(e);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments in 'add network' command.");\r
+ return;\r
+ }\r
+ }\r
+ if (ins == null) {\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Bad URL.");\r
+ return;\r
+ }\r
+ boolean rv = true;\r
+ for (Insertable dd : ins) {\r
+ rv &= doInsert(dd);\r
+ }\r
+ if (rv) {\r
+ elr.setResult(HttpServletResponse.SC_OK);\r
+ eventlogger.info(elr);\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ provisioningDataChanged();\r
+ provisioningParametersChanged();\r
+ } else {\r
+ // Something went wrong with the INSERT\r
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* This Servlet handles requests to the <Statistics API> and <Statistics consilidated resultset>,\r
- * @author Manish Singh \r
+ * @author Manish Singh\r
* @version $Id: StatisticsServlet.java,v 1.11 2016/08/10 17:27:02 Manish Exp $\r
*/\r
@SuppressWarnings("serial")\r
\r
public class StatisticsServlet extends BaseServlet {\r
\r
- private static final long TWENTYFOUR_HOURS = (24 * 60 * 60 * 1000L);\r
- private static final String fmt1 = "yyyy-MM-dd'T'HH:mm:ss'Z'";\r
- private static final String fmt2 = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";\r
-\r
- \r
- /**\r
- * DELETE a logging URL -- not supported.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- String message = "DELETE not allowed for the logURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * GET a Statistics URL -- retrieve Statistics data for a feed or subscription.\r
- * See the <b>Statistics API</b> document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- \r
- Map<String, String> map = buildMapFromRequest(req);\r
- if (map.get("err") != null) {\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments: "+map.get("err"));\r
- return;\r
- }\r
- // check Accept: header??\r
- \r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(LOGLIST_CONTENT_TYPE);\r
- ServletOutputStream out = resp.getOutputStream();\r
- \r
- \r
- String outputType = "json";\r
- String feedids = null;\r
- \r
- if(req.getParameter("feedid") ==null && req.getParameter("groupid") ==null)\r
- {\r
- out.print("Invalid request, Feedid or Group ID is required.");\r
- }\r
- \r
- if(req.getParameter("feedid")!=null && req.getParameter("groupid") == null) {\r
- map.put("feedids", req.getParameter("feedid").replace("|", ",").toString());\r
- }\r
-\r
- if(req.getParameter("groupid") != null && req.getParameter("feedid") ==null) {\r
- // String groupid1 = null;\r
- StringBuffer groupid1 = new StringBuffer(); \r
- \r
- try {\r
- System.out.println("feeedidsssssssss");\r
- groupid1 = this.getFeedIdsByGroupId(Integer.parseInt(req.getParameter("groupid")));\r
- System.out.println("feeedids"+req.getParameter("groupid"));\r
- \r
- map.put("feedids", groupid1.toString());\r
- System.out.println("groupid1" +groupid1.toString());\r
- \r
- \r
- } catch (NumberFormatException e) {\r
- e.printStackTrace();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- if(req.getParameter("groupid") != null && req.getParameter("feedid") !=null) {\r
- StringBuffer groupid1 = new StringBuffer();\r
- \r
- \r
- try {\r
- System.out.println("both r not null");\r
- groupid1 = this.getFeedIdsByGroupId(Integer.parseInt(req.getParameter("groupid")));\r
- System.out.println("feeedids"+req.getParameter("groupid"));\r
- groupid1.append(",");\r
- groupid1.append(req.getParameter("feedid").replace("|", ",").toString());\r
- \r
- map.put("feedids", groupid1.toString());\r
- \r
- \r
- System.out.println("groupid1" +groupid1.toString());\r
- \r
- \r
- } catch (NumberFormatException e) {\r
- e.printStackTrace();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- \r
- \r
- \r
- if(req.getParameter("subid")!=null && req.getParameter("feedid") !=null) {\r
- StringBuffer subidstr = new StringBuffer();\r
-// subidstr.append(" and e.DELIVERY_SUBID in(subid)");\r
-// subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
- subidstr.append("and e.DELIVERY_SUBID in(");\r
- \r
- subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
- subidstr.append(")");\r
- map.put("subid", subidstr.toString());\r
- }\r
- if(req.getParameter("subid")!=null && req.getParameter("groupid") !=null) {\r
- StringBuffer subidstr = new StringBuffer();\r
-// subidstr.append(" and e.DELIVERY_SUBID in(subid)");\r
-// subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
- subidstr.append("and e.DELIVERY_SUBID in(");\r
- \r
- subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
- subidstr.append(")");\r
- map.put("subid", subidstr.toString());\r
- }\r
- if(req.getParameter("type")!=null) {\r
- map.put("eventType", req.getParameter("type").replace("|", ",").toString());\r
- }\r
- if(req.getParameter("output_type")!=null) {\r
- map.put("output_type", req.getParameter("output_type").toString());\r
- }\r
- if(req.getParameter("start_time")!=null) {\r
- map.put("start_time", req.getParameter("start_time").toString());\r
- }\r
- if(req.getParameter("end_time")!=null) {\r
- map.put("end_time", req.getParameter("end_time").toString());\r
- }\r
- \r
- if(req.getParameter("time")!=null) {\r
- map.put("start_time", req.getParameter("time").toString());\r
- map.put("end_time", null);\r
- }\r
- \r
- \r
- \r
- if(req.getParameter("output_type") !=null)\r
- {\r
- outputType = req.getParameter("output_type");\r
- }\r
- \r
- \r
- try {\r
- \r
- String filterQuery = this.queryGeneretor(map);\r
- eventlogger.debug("SQL Query for Statistics resultset. "+filterQuery);\r
- \r
- ResultSet rs=this.getRecordsForSQL(filterQuery);\r
- \r
- if(outputType.equals("csv")) {\r
- resp.setContentType("application/octet-stream");\r
- Date date = new Date() ;\r
- SimpleDateFormat dateFormat = new SimpleDateFormat("dd-MM-YYYY HH:mm:ss") ;\r
- resp.setHeader("Content-Disposition", "attachment; filename=\"result:"+dateFormat.format(date)+".csv\"");\r
- eventlogger.info("Generating CSV file from Statistics resultset");\r
- \r
- rsToCSV(rs, out);\r
- }\r
- else {\r
- eventlogger.info("Generating JSON for Statistics resultset");\r
- this.rsToJson(rs, out); \r
- }\r
- } \r
- catch (IOException e) {\r
- eventlogger.error("IOException - Generating JSON/CSV:"+e);\r
- e.printStackTrace();\r
- } \r
- catch (JSONException e) {\r
- eventlogger.error("JSONException - executing SQL query:"+e);\r
- e.printStackTrace();\r
- } catch (SQLException e) {\r
- eventlogger.error("SQLException - executing SQL query:"+e);\r
- e.printStackTrace();\r
- } catch (ParseException e) {\r
- eventlogger.error("ParseException - executing SQL query:"+e);\r
- e.printStackTrace();\r
- }\r
- }\r
- \r
- \r
- /**\r
- * rsToJson - Converting RS to JSON object\r
- * @exception IOException, SQLException\r
- * @param out ServletOutputStream, rs as ResultSet\r
- */\r
- public void rsToCSV(ResultSet rs, ServletOutputStream out) throws IOException, SQLException {\r
- String header = "FEEDNAME,FEEDID,FILES_PUBLISHED,PUBLISH_LENGTH, FILES_DELIVERED, DELIVERED_LENGTH, SUBSCRIBER_URL, SUBID, PUBLISH_TIME,DELIVERY_TIME, AverageDelay\n";\r
-\r
- // String header = "FEEDNAME,FEEDID,TYPE,REMOTE_ADDR,DELIVERY_SUBID,REQURI,TOTAL CONTENT LENGTH,NO OF FILE,AVERAGE DELAY\n";\r
- \r
+ private static final long TWENTYFOUR_HOURS = (24 * 60 * 60 * 1000L);\r
+ private static final String fmt1 = "yyyy-MM-dd'T'HH:mm:ss'Z'";\r
+ private static final String fmt2 = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";\r
+\r
+\r
+ /**\r
+ * DELETE a logging URL -- not supported.\r
+ */\r
+ @Override\r
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ String message = "DELETE not allowed for the logURL.";\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+ }\r
+ /**\r
+ * GET a Statistics URL -- retrieve Statistics data for a feed or subscription.\r
+ * See the <b>Statistics API</b> document for details on how this method should be invoked.\r
+ */\r
+ @Override\r
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+\r
+ Map<String, String> map = buildMapFromRequest(req);\r
+ if (map.get("err") != null) {\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, "Invalid arguments: "+map.get("err"));\r
+ return;\r
+ }\r
+ // check Accept: header??\r
+\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ resp.setContentType(LOGLIST_CONTENT_TYPE);\r
+ ServletOutputStream out = resp.getOutputStream();\r
+\r
+\r
+ String outputType = "json";\r
+ String feedids = null;\r
+\r
+ if(req.getParameter("feedid") ==null && req.getParameter("groupid") ==null)\r
+ {\r
+ out.print("Invalid request, Feedid or Group ID is required.");\r
+ }\r
+\r
+ if(req.getParameter("feedid")!=null && req.getParameter("groupid") == null) {\r
+ map.put("feedids", req.getParameter("feedid").replace("|", ",").toString());\r
+ }\r
+\r
+ if(req.getParameter("groupid") != null && req.getParameter("feedid") ==null) {\r
+ // String groupid1 = null;\r
+ StringBuffer groupid1 = new StringBuffer();\r
+\r
+ try {\r
+ System.out.println("feeedidsssssssss");\r
+ groupid1 = this.getFeedIdsByGroupId(Integer.parseInt(req.getParameter("groupid")));\r
+ System.out.println("feeedids"+req.getParameter("groupid"));\r
+\r
+ map.put("feedids", groupid1.toString());\r
+ System.out.println("groupid1" +groupid1.toString());\r
+\r
+\r
+ } catch (NumberFormatException e) {\r
+ e.printStackTrace();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ if(req.getParameter("groupid") != null && req.getParameter("feedid") !=null) {\r
+ StringBuffer groupid1 = new StringBuffer();\r
+\r
+\r
+ try {\r
+ System.out.println("both r not null");\r
+ groupid1 = this.getFeedIdsByGroupId(Integer.parseInt(req.getParameter("groupid")));\r
+ System.out.println("feeedids"+req.getParameter("groupid"));\r
+ groupid1.append(",");\r
+ groupid1.append(req.getParameter("feedid").replace("|", ",").toString());\r
+\r
+ map.put("feedids", groupid1.toString());\r
+\r
+\r
+ System.out.println("groupid1" +groupid1.toString());\r
+\r
+\r
+ } catch (NumberFormatException e) {\r
+ e.printStackTrace();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+\r
+\r
+\r
+ if(req.getParameter("subid")!=null && req.getParameter("feedid") !=null) {\r
+ StringBuffer subidstr = new StringBuffer();\r
+// subidstr.append(" and e.DELIVERY_SUBID in(subid)");\r
+// subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
+ subidstr.append("and e.DELIVERY_SUBID in(");\r
+\r
+ subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
+ subidstr.append(")");\r
+ map.put("subid", subidstr.toString());\r
+ }\r
+ if(req.getParameter("subid")!=null && req.getParameter("groupid") !=null) {\r
+ StringBuffer subidstr = new StringBuffer();\r
+// subidstr.append(" and e.DELIVERY_SUBID in(subid)");\r
+// subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
+ subidstr.append("and e.DELIVERY_SUBID in(");\r
+\r
+ subidstr.append(req.getParameter("subid").replace("|", ",").toString());\r
+ subidstr.append(")");\r
+ map.put("subid", subidstr.toString());\r
+ }\r
+ if(req.getParameter("type")!=null) {\r
+ map.put("eventType", req.getParameter("type").replace("|", ",").toString());\r
+ }\r
+ if(req.getParameter("output_type")!=null) {\r
+ map.put("output_type", req.getParameter("output_type").toString());\r
+ }\r
+ if(req.getParameter("start_time")!=null) {\r
+ map.put("start_time", req.getParameter("start_time").toString());\r
+ }\r
+ if(req.getParameter("end_time")!=null) {\r
+ map.put("end_time", req.getParameter("end_time").toString());\r
+ }\r
+\r
+ if(req.getParameter("time")!=null) {\r
+ map.put("start_time", req.getParameter("time").toString());\r
+ map.put("end_time", null);\r
+ }\r
+\r
+\r
+\r
+ if(req.getParameter("output_type") !=null)\r
+ {\r
+ outputType = req.getParameter("output_type");\r
+ }\r
+\r
+\r
+ try {\r
+\r
+ String filterQuery = this.queryGeneretor(map);\r
+ eventlogger.debug("SQL Query for Statistics resultset. "+filterQuery);\r
+\r
+ ResultSet rs=this.getRecordsForSQL(filterQuery);\r
+\r
+ if(outputType.equals("csv")) {\r
+ resp.setContentType("application/octet-stream");\r
+ Date date = new Date() ;\r
+ SimpleDateFormat dateFormat = new SimpleDateFormat("dd-MM-YYYY HH:mm:ss") ;\r
+ resp.setHeader("Content-Disposition", "attachment; filename=\"result:"+dateFormat.format(date)+".csv\"");\r
+ eventlogger.info("Generating CSV file from Statistics resultset");\r
+\r
+ rsToCSV(rs, out);\r
+ }\r
+ else {\r
+ eventlogger.info("Generating JSON for Statistics resultset");\r
+ this.rsToJson(rs, out);\r
+ }\r
+ }\r
+ catch (IOException e) {\r
+ eventlogger.error("IOException - Generating JSON/CSV:"+e);\r
+ e.printStackTrace();\r
+ }\r
+ catch (JSONException e) {\r
+ eventlogger.error("JSONException - executing SQL query:"+e);\r
+ e.printStackTrace();\r
+ } catch (SQLException e) {\r
+ eventlogger.error("SQLException - executing SQL query:"+e);\r
+ e.printStackTrace();\r
+ } catch (ParseException e) {\r
+ eventlogger.error("ParseException - executing SQL query:"+e);\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+\r
+\r
+ /**\r
+ * rsToJson - Converting RS to JSON object\r
+ * @exception IOException, SQLException\r
+ * @param out ServletOutputStream, rs as ResultSet\r
+ */\r
+ public void rsToCSV(ResultSet rs, ServletOutputStream out) throws IOException, SQLException {\r
+ String header = "FEEDNAME,FEEDID,FILES_PUBLISHED,PUBLISH_LENGTH, FILES_DELIVERED, DELIVERED_LENGTH, SUBSCRIBER_URL, SUBID, PUBLISH_TIME,DELIVERY_TIME, AverageDelay\n";\r
+\r
+ // String header = "FEEDNAME,FEEDID,TYPE,REMOTE_ADDR,DELIVERY_SUBID,REQURI,TOTAL CONTENT LENGTH,NO OF FILE,AVERAGE DELAY\n";\r
+\r
out.write(header.getBytes());\r
- \r
+\r
while(rs.next()) {\r
- StringBuffer line = new StringBuffer();\r
- line.append(rs.getString("FEEDNAME"));\r
- line.append(",");\r
- line.append(rs.getString("FEEDID"));\r
- line.append(",");\r
- line.append(rs.getString("FILES_PUBLISHED"));\r
- line.append(",");\r
- line.append(rs.getString("PUBLISH_LENGTH"));\r
- line.append(",");\r
- line.append(rs.getString("FILES_DELIVERED"));\r
- line.append(",");\r
- line.append(rs.getString("DELIVERED_LENGTH"));\r
- line.append(",");\r
- line.append(rs.getString("SUBSCRIBER_URL"));\r
- line.append(",");\r
- line.append(rs.getString("SUBID"));\r
- line.append(",");\r
- line.append(rs.getString("PUBLISH_TIME"));\r
- line.append(",");\r
- line.append(rs.getString("DELIVERY_TIME"));\r
- line.append(",");\r
- line.append(rs.getString("AverageDelay"));\r
- line.append(",");\r
- \r
- line.append("\n");\r
- out.write(line.toString().getBytes());\r
- out.flush();\r
+ StringBuffer line = new StringBuffer();\r
+ line.append(rs.getString("FEEDNAME"));\r
+ line.append(",");\r
+ line.append(rs.getString("FEEDID"));\r
+ line.append(",");\r
+ line.append(rs.getString("FILES_PUBLISHED"));\r
+ line.append(",");\r
+ line.append(rs.getString("PUBLISH_LENGTH"));\r
+ line.append(",");\r
+ line.append(rs.getString("FILES_DELIVERED"));\r
+ line.append(",");\r
+ line.append(rs.getString("DELIVERED_LENGTH"));\r
+ line.append(",");\r
+ line.append(rs.getString("SUBSCRIBER_URL"));\r
+ line.append(",");\r
+ line.append(rs.getString("SUBID"));\r
+ line.append(",");\r
+ line.append(rs.getString("PUBLISH_TIME"));\r
+ line.append(",");\r
+ line.append(rs.getString("DELIVERY_TIME"));\r
+ line.append(",");\r
+ line.append(rs.getString("AverageDelay"));\r
+ line.append(",");\r
+\r
+ line.append("\n");\r
+ out.write(line.toString().getBytes());\r
+ out.flush();\r
}\r
- }\r
- \r
- /**\r
- * rsToJson - Converting RS to JSON object\r
- * @exception IOException, SQLException\r
- * @param out ServletOutputStream, rs as ResultSet\r
- */\r
- public void rsToJson(ResultSet rs, ServletOutputStream out) throws IOException, SQLException {\r
- \r
- String fields[] = {"FEEDNAME","FEEDID","FILES_PUBLISHED","PUBLISH_LENGTH", "FILES_DELIVERED", "DELIVERED_LENGTH", "SUBSCRIBER_URL", "SUBID", "PUBLISH_TIME","DELIVERY_TIME", "AverageDelay"};\r
- StringBuffer line = new StringBuffer();\r
- \r
- line.append("[\n");\r
- \r
- while(rs.next()) {\r
- LOGJSONObject j2 = new LOGJSONObject();\r
- for (String key : fields) {\r
- Object v = rs.getString(key);\r
- if (v != null)\r
- j2.put(key.toLowerCase(), v);\r
- else\r
- j2.put(key.toLowerCase(), "");\r
- }\r
- line = line.append(j2.toString());;\r
- line.append(",\n");\r
- }\r
- line.append("]");\r
- out.print(line.toString());\r
- }\r
- \r
- /**\r
- * getFeedIdsByGroupId - Getting FEEDID's by GROUP ID.\r
- * @exception SQL Query SQLException.\r
- * @param groupIds\r
- */\r
- public StringBuffer getFeedIdsByGroupId(int groupIds) throws SQLException{ \r
- \r
- DB db = null; \r
- Connection conn = null; \r
- PreparedStatement prepareStatement = null; \r
- ResultSet resultSet=null; \r
- String sqlGoupid = null; \r
- StringBuffer feedIds = new StringBuffer(); \r
- \r
- try { \r
- db = new DB(); \r
- conn = db.getConnection(); \r
- sqlGoupid= " SELECT FEEDID from FEEDS WHERE GROUPID = ?"; \r
- prepareStatement =conn.prepareStatement(sqlGoupid); \r
- prepareStatement.setInt(1, groupIds); \r
- resultSet=prepareStatement.executeQuery(); \r
- while(resultSet.next()){ \r
- feedIds.append(resultSet.getInt("FEEDID"));\r
- feedIds.append(",");\r
- } \r
- feedIds.deleteCharAt(feedIds.length()-1);\r
- System.out.println("feedIds"+feedIds.toString());\r
- \r
- } catch (SQLException e) { \r
- e.printStackTrace(); \r
- } finally { \r
- try { \r
- if(resultSet != null) { \r
- resultSet.close(); \r
- resultSet = null; \r
- } \r
- \r
- if(prepareStatement != null) { \r
- prepareStatement.close(); \r
- prepareStatement = null; \r
- } \r
- \r
- if(conn != null){ \r
- db.release(conn); \r
- } \r
- } catch(Exception e) { \r
- e.printStackTrace(); \r
- } \r
- } \r
- return feedIds; \r
- }\r
-\r
- \r
- /**\r
- * queryGeneretor - Generating sql query\r
- * @exception ParseException\r
- * @param map as key value pare of all user input fields\r
- */\r
- public String queryGeneretor(Map<String, String> map) throws ParseException{\r
- \r
- String sql = null;\r
- String eventType = null;\r
- String feedids = null;\r
- String start_time = null;\r
- String end_time = null;\r
- String subid=" ";\r
- if(map.get("eventType") != null){\r
- eventType=(String) map.get("eventType");\r
- }\r
- if(map.get("feedids") != null){\r
- feedids=(String) map.get("feedids");\r
- }\r
- if(map.get("start_time") != null){\r
- start_time=(String) map.get("start_time");\r
- }\r
- if(map.get("end_time") != null){\r
- end_time=(String) map.get("end_time");\r
- }\r
- if("all".equalsIgnoreCase(eventType)){\r
- eventType="PUB','DEL, EXP, PBF";\r
- }\r
- if(map.get("subid") != null){\r
- subid=(String) map.get("subid");\r
- }\r
- \r
- eventlogger.info("Generating sql query to get Statistics resultset. ");\r
- \r
- if(end_time==null && start_time==null ){\r
-\r
- \r
- sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED, sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME, AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 group by SUBID";\r
- \r
- return sql;\r
- }else if(start_time!=null && end_time==null ){\r
-\r
- long inputTimeInMilli=60000*Long.parseLong(start_time);\r
- Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"));\r
- long currentTimeInMilli=cal.getTimeInMillis();\r
- long compareTime=currentTimeInMilli-inputTimeInMilli;\r
- \r
- sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED, sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME, AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 and e.event_time>="+compareTime+" group by SUBID";\r
- \r
- return sql;\r
- \r
- }else{\r
- SimpleDateFormat inFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");\r
- Date startDate=inFormat.parse(start_time);\r
- Date endDate=inFormat.parse(end_time);\r
-\r
- long startInMillis=startDate.getTime();\r
- long endInMillis=endDate.getTime();\r
- \r
- {\r
- \r
- sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED, sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME, AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 and e.event_time between "+startInMillis+" and "+endInMillis+" group by SUBID";\r
- \r
- }\r
- return sql;\r
- }\r
- }\r
- \r
- \r
- /**\r
- * PUT a Statistics URL -- not supported.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- String message = "PUT not allowed for the StatisticsURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * POST a Statistics URL -- not supported.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- String message = "POST not allowed for the StatisticsURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
-\r
- private Map<String, String> buildMapFromRequest(HttpServletRequest req) {\r
- Map<String, String> map = new HashMap<String, String>();\r
- String s = req.getParameter("type");\r
- if (s != null) {\r
- if (s.equals("pub") || s.equals("del") || s.equals("exp")) {\r
- map.put("type", s);\r
- } else {\r
- map.put("err", "bad type");\r
- return map;\r
- }\r
- } else\r
- map.put("type", "all");\r
- map.put("publishSQL", "");\r
- map.put("statusSQL", "");\r
- map.put("resultSQL", "");\r
- map.put("reasonSQL", "");\r
-\r
- s = req.getParameter("publishId");\r
- if (s != null) {\r
- if (s.indexOf("'") >= 0) {\r
- map.put("err", "bad publishId");\r
- return map;\r
- }\r
- map.put("publishSQL", " AND PUBLISH_ID = '"+s+"'");\r
- }\r
-\r
- s = req.getParameter("statusCode");\r
- if (s != null) {\r
- String sql = null;\r
- if (s.equals("success")) {\r
- sql = " AND STATUS >= 200 AND STATUS < 300";\r
- } else if (s.equals("redirect")) {\r
- sql = " AND STATUS >= 300 AND STATUS < 400";\r
- } else if (s.equals("failure")) {\r
- sql = " AND STATUS >= 400";\r
- } else {\r
- try {\r
- Integer n = Integer.parseInt(s);\r
- if ((n >= 100 && n < 600) || (n == -1))\r
- sql = " AND STATUS = " + n;\r
- } catch (NumberFormatException e) {\r
- }\r
- }\r
- if (sql == null) {\r
- map.put("err", "bad statusCode");\r
- return map;\r
- }\r
- map.put("statusSQL", sql);\r
- map.put("resultSQL", sql.replaceAll("STATUS", "RESULT"));\r
- }\r
-\r
- s = req.getParameter("expiryReason");\r
- if (s != null) {\r
- map.put("type", "exp");\r
- if (s.equals("notRetryable")) {\r
- map.put("reasonSQL", " AND REASON = 'notRetryable'");\r
- } else if (s.equals("retriesExhausted")) {\r
- map.put("reasonSQL", " AND REASON = 'retriesExhausted'");\r
- } else if (s.equals("diskFull")) {\r
- map.put("reasonSQL", " AND REASON = 'diskFull'");\r
- } else if (s.equals("other")) {\r
- map.put("reasonSQL", " AND REASON = 'other'");\r
- } else {\r
- map.put("err", "bad expiryReason");\r
- return map;\r
- }\r
- }\r
-\r
- long stime = getTimeFromParam(req.getParameter("start"));\r
- if (stime < 0) {\r
- map.put("err", "bad start");\r
- return map;\r
- }\r
- long etime = getTimeFromParam(req.getParameter("end"));\r
- if (etime < 0) {\r
- map.put("err", "bad end");\r
- return map;\r
- }\r
- if (stime == 0 && etime == 0) {\r
- etime = System.currentTimeMillis();\r
- stime = etime - TWENTYFOUR_HOURS;\r
- } else if (stime == 0) {\r
- stime = etime - TWENTYFOUR_HOURS;\r
- } else if (etime == 0) {\r
- etime = stime + TWENTYFOUR_HOURS;\r
- }\r
- map.put("timeSQL", String.format(" AND EVENT_TIME >= %d AND EVENT_TIME <= %d", stime, etime));\r
- return map;\r
- }\r
- private long getTimeFromParam(final String s) {\r
- if (s == null)\r
- return 0;\r
- try {\r
- // First, look for an RFC 3339 date\r
- String fmt = (s.indexOf('.') > 0) ? fmt2 : fmt1;\r
- SimpleDateFormat sdf = new SimpleDateFormat(fmt);\r
- Date d = sdf.parse(s);\r
- return d.getTime();\r
- } catch (ParseException e) {\r
- }\r
- try {\r
- // Also allow a long (in ms); useful for testing\r
- long n = Long.parseLong(s);\r
- return n;\r
- } catch (NumberFormatException e) {\r
- }\r
- intlogger.info("Error parsing time="+s);\r
- return -1;\r
- }\r
-\r
- \r
- private ResultSet getRecordsForSQL(String sql) {\r
- intlogger.debug(sql);\r
- long start = System.currentTimeMillis();\r
- DB db = new DB();\r
- Connection conn = null;\r
- ResultSet rs=null;\r
- \r
- try {\r
- conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- PreparedStatement pst=conn.prepareStatement(sql);\r
- rs=pst.executeQuery();\r
- //this.rsToJson(rs)\r
- //rs.close();\r
- stmt.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- if (conn != null)\r
- db.release(conn);\r
- }\r
- \r
- intlogger.debug("Time: " + (System.currentTimeMillis()-start) + " ms");\r
- \r
- return rs;\r
- }\r
+ }\r
+\r
+ /**\r
+ * rsToJson - Converting RS to JSON object\r
+ * @exception IOException, SQLException\r
+ * @param out ServletOutputStream, rs as ResultSet\r
+ */\r
+ public void rsToJson(ResultSet rs, ServletOutputStream out) throws IOException, SQLException {\r
+\r
+ String fields[] = {"FEEDNAME","FEEDID","FILES_PUBLISHED","PUBLISH_LENGTH", "FILES_DELIVERED", "DELIVERED_LENGTH", "SUBSCRIBER_URL", "SUBID", "PUBLISH_TIME","DELIVERY_TIME", "AverageDelay"};\r
+ StringBuffer line = new StringBuffer();\r
+\r
+ line.append("[\n");\r
+\r
+ while(rs.next()) {\r
+ LOGJSONObject j2 = new LOGJSONObject();\r
+ for (String key : fields) {\r
+ Object v = rs.getString(key);\r
+ if (v != null)\r
+ j2.put(key.toLowerCase(), v);\r
+ else\r
+ j2.put(key.toLowerCase(), "");\r
+ }\r
+ line = line.append(j2.toString());;\r
+ line.append(",\n");\r
+ }\r
+ line.append("]");\r
+ out.print(line.toString());\r
+ }\r
+\r
+ /**\r
+ * getFeedIdsByGroupId - Getting FEEDID's by GROUP ID.\r
+ * @exception SQL Query SQLException.\r
+ * @param groupIds\r
+ */\r
+ public StringBuffer getFeedIdsByGroupId(int groupIds) throws SQLException{\r
+\r
+ DB db = null;\r
+ Connection conn = null;\r
+ PreparedStatement prepareStatement = null;\r
+ ResultSet resultSet=null;\r
+ String sqlGoupid = null;\r
+ StringBuffer feedIds = new StringBuffer();\r
+\r
+ try {\r
+ db = new DB();\r
+ conn = db.getConnection();\r
+ sqlGoupid= " SELECT FEEDID from FEEDS WHERE GROUPID = ?";\r
+ prepareStatement =conn.prepareStatement(sqlGoupid);\r
+ prepareStatement.setInt(1, groupIds);\r
+ resultSet=prepareStatement.executeQuery();\r
+ while(resultSet.next()){\r
+ feedIds.append(resultSet.getInt("FEEDID"));\r
+ feedIds.append(",");\r
+ }\r
+ feedIds.deleteCharAt(feedIds.length()-1);\r
+ System.out.println("feedIds"+feedIds.toString());\r
+\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ if(resultSet != null) {\r
+ resultSet.close();\r
+ resultSet = null;\r
+ }\r
+\r
+ if(prepareStatement != null) {\r
+ prepareStatement.close();\r
+ prepareStatement = null;\r
+ }\r
+\r
+ if(conn != null){\r
+ db.release(conn);\r
+ }\r
+ } catch(Exception e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return feedIds;\r
+ }\r
+\r
+\r
+ /**\r
+ * queryGeneretor - Generating sql query\r
+ * @exception ParseException\r
+ * @param map as key value pare of all user input fields\r
+ */\r
+ public String queryGeneretor(Map<String, String> map) throws ParseException{\r
+\r
+ String sql = null;\r
+ String eventType = null;\r
+ String feedids = null;\r
+ String start_time = null;\r
+ String end_time = null;\r
+ String subid=" ";\r
+ if(map.get("eventType") != null){\r
+ eventType=(String) map.get("eventType");\r
+ }\r
+ if(map.get("feedids") != null){\r
+ feedids=(String) map.get("feedids");\r
+ }\r
+ if(map.get("start_time") != null){\r
+ start_time=(String) map.get("start_time");\r
+ }\r
+ if(map.get("end_time") != null){\r
+ end_time=(String) map.get("end_time");\r
+ }\r
+ if("all".equalsIgnoreCase(eventType)){\r
+ eventType="PUB','DEL, EXP, PBF";\r
+ }\r
+ if(map.get("subid") != null){\r
+ subid=(String) map.get("subid");\r
+ }\r
+\r
+ eventlogger.info("Generating sql query to get Statistics resultset. ");\r
+\r
+ if(end_time==null && start_time==null ){\r
+\r
+\r
+ sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED, sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME, AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 group by SUBID";\r
+\r
+ return sql;\r
+ }else if(start_time!=null && end_time==null ){\r
+\r
+ long inputTimeInMilli=60000*Long.parseLong(start_time);\r
+ Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"));\r
+ long currentTimeInMilli=cal.getTimeInMillis();\r
+ long compareTime=currentTimeInMilli-inputTimeInMilli;\r
+\r
+ sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED, sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME, AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 and e.event_time>="+compareTime+" group by SUBID";\r
+\r
+ return sql;\r
+\r
+ }else{\r
+ SimpleDateFormat inFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");\r
+ Date startDate=inFormat.parse(start_time);\r
+ Date endDate=inFormat.parse(end_time);\r
+\r
+ long startInMillis=startDate.getTime();\r
+ long endInMillis=endDate.getTime();\r
+\r
+ {\r
+\r
+ sql="SELECT (SELECT NAME FROM FEEDS AS f WHERE f.FEEDID in("+feedids+") and f.FEEDID=e.FEEDID) AS FEEDNAME, e.FEEDID as FEEDID, (SELECT COUNT(*) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS FILES_PUBLISHED,(SELECT SUM(content_length) FROM LOG_RECORDS AS c WHERE c.FEEDID in("+feedids+") and c.FEEDID=e.FEEDID AND c.TYPE='PUB') AS PUBLISH_LENGTH, COUNT(e.EVENT_TIME) as FILES_DELIVERED, sum(m.content_length) as DELIVERED_LENGTH,SUBSTRING_INDEX(e.REQURI,'/',+3) as SUBSCRIBER_URL, e.DELIVERY_SUBID as SUBID, e.EVENT_TIME AS PUBLISH_TIME, m.EVENT_TIME AS DELIVERY_TIME, AVG(e.EVENT_TIME - m.EVENT_TIME)/1000 as AverageDelay FROM LOG_RECORDS e JOIN LOG_RECORDS m ON m.PUBLISH_ID = e.PUBLISH_ID AND e.FEEDID IN ("+feedids+") "+subid+" AND m.STATUS=204 AND e.RESULT=204 and e.event_time between "+startInMillis+" and "+endInMillis+" group by SUBID";\r
+\r
+ }\r
+ return sql;\r
+ }\r
+ }\r
+\r
+\r
+ /**\r
+ * PUT a Statistics URL -- not supported.\r
+ */\r
+ @Override\r
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ String message = "PUT not allowed for the StatisticsURL.";\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+ }\r
+ /**\r
+ * POST a Statistics URL -- not supported.\r
+ */\r
+ @Override\r
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ String message = "POST not allowed for the StatisticsURL.";\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+ }\r
+\r
+ private Map<String, String> buildMapFromRequest(HttpServletRequest req) {\r
+ Map<String, String> map = new HashMap<String, String>();\r
+ String s = req.getParameter("type");\r
+ if (s != null) {\r
+ if (s.equals("pub") || s.equals("del") || s.equals("exp")) {\r
+ map.put("type", s);\r
+ } else {\r
+ map.put("err", "bad type");\r
+ return map;\r
+ }\r
+ } else\r
+ map.put("type", "all");\r
+ map.put("publishSQL", "");\r
+ map.put("statusSQL", "");\r
+ map.put("resultSQL", "");\r
+ map.put("reasonSQL", "");\r
+\r
+ s = req.getParameter("publishId");\r
+ if (s != null) {\r
+ if (s.indexOf("'") >= 0) {\r
+ map.put("err", "bad publishId");\r
+ return map;\r
+ }\r
+ map.put("publishSQL", " AND PUBLISH_ID = '"+s+"'");\r
+ }\r
+\r
+ s = req.getParameter("statusCode");\r
+ if (s != null) {\r
+ String sql = null;\r
+ if (s.equals("success")) {\r
+ sql = " AND STATUS >= 200 AND STATUS < 300";\r
+ } else if (s.equals("redirect")) {\r
+ sql = " AND STATUS >= 300 AND STATUS < 400";\r
+ } else if (s.equals("failure")) {\r
+ sql = " AND STATUS >= 400";\r
+ } else {\r
+ try {\r
+ Integer n = Integer.parseInt(s);\r
+ if ((n >= 100 && n < 600) || (n == -1))\r
+ sql = " AND STATUS = " + n;\r
+ } catch (NumberFormatException e) {\r
+ }\r
+ }\r
+ if (sql == null) {\r
+ map.put("err", "bad statusCode");\r
+ return map;\r
+ }\r
+ map.put("statusSQL", sql);\r
+ map.put("resultSQL", sql.replaceAll("STATUS", "RESULT"));\r
+ }\r
+\r
+ s = req.getParameter("expiryReason");\r
+ if (s != null) {\r
+ map.put("type", "exp");\r
+ if (s.equals("notRetryable")) {\r
+ map.put("reasonSQL", " AND REASON = 'notRetryable'");\r
+ } else if (s.equals("retriesExhausted")) {\r
+ map.put("reasonSQL", " AND REASON = 'retriesExhausted'");\r
+ } else if (s.equals("diskFull")) {\r
+ map.put("reasonSQL", " AND REASON = 'diskFull'");\r
+ } else if (s.equals("other")) {\r
+ map.put("reasonSQL", " AND REASON = 'other'");\r
+ } else {\r
+ map.put("err", "bad expiryReason");\r
+ return map;\r
+ }\r
+ }\r
+\r
+ long stime = getTimeFromParam(req.getParameter("start"));\r
+ if (stime < 0) {\r
+ map.put("err", "bad start");\r
+ return map;\r
+ }\r
+ long etime = getTimeFromParam(req.getParameter("end"));\r
+ if (etime < 0) {\r
+ map.put("err", "bad end");\r
+ return map;\r
+ }\r
+ if (stime == 0 && etime == 0) {\r
+ etime = System.currentTimeMillis();\r
+ stime = etime - TWENTYFOUR_HOURS;\r
+ } else if (stime == 0) {\r
+ stime = etime - TWENTYFOUR_HOURS;\r
+ } else if (etime == 0) {\r
+ etime = stime + TWENTYFOUR_HOURS;\r
+ }\r
+ map.put("timeSQL", String.format(" AND EVENT_TIME >= %d AND EVENT_TIME <= %d", stime, etime));\r
+ return map;\r
+ }\r
+ private long getTimeFromParam(final String s) {\r
+ if (s == null)\r
+ return 0;\r
+ try {\r
+ // First, look for an RFC 3339 date\r
+ String fmt = (s.indexOf('.') > 0) ? fmt2 : fmt1;\r
+ SimpleDateFormat sdf = new SimpleDateFormat(fmt);\r
+ Date d = sdf.parse(s);\r
+ return d.getTime();\r
+ } catch (ParseException e) {\r
+ }\r
+ try {\r
+ // Also allow a long (in ms); useful for testing\r
+ long n = Long.parseLong(s);\r
+ return n;\r
+ } catch (NumberFormatException e) {\r
+ }\r
+ intlogger.info("Error parsing time="+s);\r
+ return -1;\r
+ }\r
+\r
+\r
+ private ResultSet getRecordsForSQL(String sql) {\r
+ intlogger.debug(sql);\r
+ long start = System.currentTimeMillis();\r
+ DB db = new DB();\r
+ Connection conn = null;\r
+ ResultSet rs=null;\r
+\r
+ try {\r
+ conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ PreparedStatement pst=conn.prepareStatement(sql);\r
+ rs=pst.executeQuery();\r
+ //this.rsToJson(rs)\r
+ //rs.close();\r
+ stmt.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ if (conn != null)\r
+ db.release(conn);\r
+ }\r
+\r
+ intlogger.debug("Time: " + (System.currentTimeMillis()-start) + " ms");\r
+\r
+ return rs;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*/\r
@SuppressWarnings("serial")\r
public class SubLogServlet extends LogServlet {\r
- public SubLogServlet() {\r
- super(false);\r
- }\r
+ public SubLogServlet() {\r
+ super(false);\r
+ }\r
}\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.IOException;\r
-import java.io.InvalidObjectException;\r
-import java.util.Collection;\r
-\r
-import javax.servlet.http.HttpServletRequest;\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.json.JSONObject;\r
-import org.onap.dmaap.datarouter.authz.AuthorizationResponse;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Subscription;\r
-import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;\r
-import org.onap.dmaap.datarouter.provisioning.utils.JSONUtilities;\r
-\r
-import com.att.eelf.configuration.EELFLogger;\r
-import com.att.eelf.configuration.EELFManager;\r
-\r
-/**\r
- * This servlet handles provisioning for the <subscribeURL> which is generated by the provisioning\r
- * server to handle the creation and inspection of subscriptions to a specific feed.\r
- *\r
- * @author Robert Eby\r
- * @version $Id$\r
- */\r
-@SuppressWarnings("serial")\r
-public class SubscribeServlet extends ProxyServlet {\r
- \r
- //Adding EELF Logger Rally:US664892 \r
- private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.SubscribeServlet");\r
-\r
- /**\r
- * DELETE on the <subscribeUrl> -- not supported.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String message = "DELETE not allowed for the subscribeURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * GET on the <subscribeUrl> -- get the list of subscriptions to a feed.\r
- * See the <i>Subscription Collection Query</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int feedid = getIdFromPath(req);\r
- if (feedid < 0) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Feed feed = Feed.getFeedById(feedid);\r
- if (feed == null || feed.isDeleted()) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
-\r
- // Display a list of URLs\r
- Collection<String> list = Subscription.getSubscriptionUrlList(feedid);\r
- String t = JSONUtilities.createJSONArray(list);\r
-\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(SUBLIST_CONTENT_TYPE);\r
- resp.getOutputStream().print(t);\r
- }\r
- /**\r
- * PUT on the <subscribeUrl> -- not supported.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- String message = "PUT not allowed for the subscribeURL.";\r
- EventLogRecord elr = new EventLogRecord(req);\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
- }\r
- /**\r
- * POST on the <subscribeUrl> -- create a new subscription to a feed.\r
- * See the <i>Creating a Subscription</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPost(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int feedid = getIdFromPath(req);\r
- if (feedid < 0) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Feed feed = Feed.getFeedById(feedid);\r
- if (feed == null || feed.isDeleted()) {\r
- message = "Missing or bad feed number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
-\r
- // check content type is SUB_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(SUB_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug(jo.toString());\r
- if (++active_subs > max_subs) {\r
- active_subs--;\r
- message = "Cannot create subscription; the maximum number of subscriptions has been configured.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_CONFLICT);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_CONFLICT, message);\r
- return;\r
- }\r
- Subscription sub = null;\r
- try {\r
- sub = new Subscription(jo);\r
- } catch (InvalidObjectException e) {\r
- active_subs--;\r
- message = e.getMessage();\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- sub.setFeedid(feedid);\r
- sub.setSubscriber(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header\r
-\r
- // Check if this subscription already exists; not an error (yet), just warn\r
- Subscription sub2 = Subscription.getSubscriptionMatching(sub);\r
- if (sub2 != null)\r
- intlogger.warn("PROV0011 Creating a duplicate subscription: new subid="+sub.getSubid()+", old subid="+sub2.getSubid());\r
-\r
- // Create SUBSCRIPTIONS table entries\r
- if (doInsert(sub)) {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_CREATED);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_CREATED);\r
- resp.setContentType(SUBFULL_CONTENT_TYPE);\r
- resp.setHeader("Location", sub.getLinks().getSelf());\r
- resp.getOutputStream().print(sub.asLimitedJSONObject().toString());\r
-\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the INSERT\r
- active_subs--;\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.util.Collection;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.json.JSONObject;
+import org.onap.dmaap.datarouter.authz.AuthorizationResponse;
+import org.onap.dmaap.datarouter.provisioning.beans.EventLogRecord;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.beans.Subscription;
+import org.onap.dmaap.datarouter.provisioning.eelf.EelfMsgs;
+import org.onap.dmaap.datarouter.provisioning.utils.JSONUtilities;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+
+/**
+ * This servlet handles provisioning for the <subscribeURL> which is generated by the provisioning
+ * server to handle the creation and inspection of subscriptions to a specific feed.
+ *
+ * @author Robert Eby
+ * @version $Id$
+ */
+@SuppressWarnings("serial")
+public class SubscribeServlet extends ProxyServlet {
+
+ //Adding EELF Logger Rally:US664892
+ private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.SubscribeServlet");
+
+ /**
+ * DELETE on the <subscribeUrl> -- not supported.
+ */
+ @Override
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doDelete");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ String message = "DELETE not allowed for the subscribeURL.";
+ EventLogRecord elr = new EventLogRecord(req);
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);
+ }
+ /**
+ * GET on the <subscribeUrl> -- get the list of subscriptions to a feed.
+ * See the <i>Subscription Collection Query</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doGet");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doGet(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ int feedid = getIdFromPath(req);
+ if (feedid < 0) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Feed feed = Feed.getFeedById(feedid);
+ if (feed == null || feed.isDeleted()) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+
+ // Display a list of URLs
+ Collection<String> list = Subscription.getSubscriptionUrlList(feedid);
+ String t = JSONUtilities.createJSONArray(list);
+
+ // send response
+ elr.setResult(HttpServletResponse.SC_OK);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.setContentType(SUBLIST_CONTENT_TYPE);
+ resp.getOutputStream().print(t);
+ }
+ /**
+ * PUT on the <subscribeUrl> -- not supported.
+ */
+ @Override
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPut");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");
+ String message = "PUT not allowed for the subscribeURL.";
+ EventLogRecord elr = new EventLogRecord(req);
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);
+ }
+ /**
+ * POST on the <subscribeUrl> -- create a new subscription to a feed.
+ * See the <i>Creating a Subscription</i> section in the <b>Provisioning API</b>
+ * document for details on how this method should be invoked.
+ */
+ @Override
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ setIpAndFqdnForEelf("doPost");
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));
+ EventLogRecord elr = new EventLogRecord(req);
+ String message = isAuthorizedForProvisioning(req);
+ if (message != null) {
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+ if (isProxyServer()) {
+ super.doPost(req, resp);
+ return;
+ }
+ String bhdr = req.getHeader(BEHALF_HEADER);
+ if (bhdr == null) {
+ message = "Missing "+BEHALF_HEADER+" header.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ int feedid = getIdFromPath(req);
+ if (feedid < 0) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ Feed feed = Feed.getFeedById(feedid);
+ if (feed == null || feed.isDeleted()) {
+ message = "Missing or bad feed number.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);
+ return;
+ }
+ // Check with the Authorizer
+ AuthorizationResponse aresp = authz.decide(req);
+ if (! aresp.isAuthorized()) {
+ message = "Policy Engine disallows access.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);
+ return;
+ }
+
+ // check content type is SUB_CONTENT_TYPE, version 1.0
+ ContentHeader ch = getContentHeader(req);
+ String ver = ch.getAttribute("version");
+ if (!ch.getType().equals(SUB_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {
+ intlogger.debug("Content-type is: "+req.getHeader("Content-Type"));
+ message = "Incorrect content-type";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);
+ return;
+ }
+ JSONObject jo = getJSONfromInput(req);
+ if (jo == null) {
+ message = "Badly formed JSON";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ if (intlogger.isDebugEnabled())
+ intlogger.debug(jo.toString());
+ if (++active_subs > max_subs) {
+ active_subs--;
+ message = "Cannot create subscription; the maximum number of subscriptions has been configured.";
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_CONFLICT);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_CONFLICT, message);
+ return;
+ }
+ Subscription sub = null;
+ try {
+ sub = new Subscription(jo);
+ } catch (InvalidObjectException e) {
+ active_subs--;
+ message = e.getMessage();
+ elr.setMessage(message);
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);
+ return;
+ }
+ sub.setFeedid(feedid);
+ sub.setSubscriber(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header
+
+ // Check if this subscription already exists; not an error (yet), just warn
+ Subscription sub2 = Subscription.getSubscriptionMatching(sub);
+ if (sub2 != null)
+ intlogger.warn("PROV0011 Creating a duplicate subscription: new subid="+sub.getSubid()+", old subid="+sub2.getSubid());
+
+ // Create SUBSCRIPTIONS table entries
+ if (doInsert(sub)) {
+ // send response
+ elr.setResult(HttpServletResponse.SC_CREATED);
+ eventlogger.info(elr);
+ resp.setStatus(HttpServletResponse.SC_CREATED);
+ resp.setContentType(SUBFULL_CONTENT_TYPE);
+ resp.setHeader("Location", sub.getLinks().getSelf());
+ resp.getOutputStream().print(sub.asLimitedJSONObject().toString());
+
+ provisioningDataChanged();
+ } else {
+ // Something went wrong with the INSERT
+ active_subs--;
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ eventlogger.info(elr);
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);
+ }
+ }
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
*/\r
@SuppressWarnings("serial")\r
public class SubscriptionServlet extends ProxyServlet {\r
- public static final String SUBCNTRL_CONTENT_TYPE = "application/vnd.att-dr.subscription-control";\r
- //Adding EELF Logger Rally:US664892 \r
+ public static final String SUBCNTRL_CONTENT_TYPE = "application/vnd.att-dr.subscription-control";\r
+ //Adding EELF Logger Rally:US664892\r
private static EELFLogger eelflogger = EELFManager.getInstance().getLogger("org.onap.dmaap.datarouter.provisioning.SubscriptionServlet");\r
\r
- /**\r
- * DELETE on the <subscriptionUrl> -- delete a subscription.\r
- * See the <i>Deleting a Subscription</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doDelete");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doDelete(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int subid = getIdFromPath(req);\r
- if (subid < 0) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Subscription sub = Subscription.getSubscriptionById(subid);\r
- if (sub == null) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
+ /**\r
+ * DELETE on the <subscriptionUrl> -- delete a subscription.\r
+ * See the <i>Deleting a Subscription</i> section in the <b>Provisioning API</b>\r
+ * document for details on how this method should be invoked.\r
+ */\r
+ @Override\r
+ public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doDelete");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ String message = isAuthorizedForProvisioning(req);\r
+ if (message != null) {\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
+ if (isProxyServer()) {\r
+ super.doDelete(req, resp);\r
+ return;\r
+ }\r
+ String bhdr = req.getHeader(BEHALF_HEADER);\r
+ if (bhdr == null) {\r
+ message = "Missing "+BEHALF_HEADER+" header.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ int subid = getIdFromPath(req);\r
+ if (subid < 0) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ Subscription sub = Subscription.getSubscriptionById(subid);\r
+ if (sub == null) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
+ return;\r
+ }\r
+ // Check with the Authorizer\r
+ AuthorizationResponse aresp = authz.decide(req);\r
+ if (! aresp.isAuthorized()) {\r
+ message = "Policy Engine disallows access.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
\r
- // Delete Subscription\r
- if (doDelete(sub)) {\r
- active_subs--;\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_NO_CONTENT);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the DELETE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
- /**\r
- * GET on the <subscriptionUrl> -- get information about a subscription.\r
- * See the <i>Retreiving Information about a Subscription</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doGet");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doGet(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int subid = getIdFromPath(req);\r
- if (subid < 0) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Subscription sub = Subscription.getSubscriptionById(subid);\r
- if (sub == null) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
+ // Delete Subscription\r
+ if (doDelete(sub)) {\r
+ active_subs--;\r
+ // send response\r
+ elr.setResult(HttpServletResponse.SC_NO_CONTENT);\r
+ eventlogger.info(elr);\r
+ resp.setStatus(HttpServletResponse.SC_NO_CONTENT);\r
+ provisioningDataChanged();\r
+ } else {\r
+ // Something went wrong with the DELETE\r
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
+ }\r
+ }\r
+ /**\r
+ * GET on the <subscriptionUrl> -- get information about a subscription.\r
+ * See the <i>Retreiving Information about a Subscription</i> section in the <b>Provisioning API</b>\r
+ * document for details on how this method should be invoked.\r
+ */\r
+ @Override\r
+ public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doGet");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ String message = isAuthorizedForProvisioning(req);\r
+ if (message != null) {\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
+ if (isProxyServer()) {\r
+ super.doGet(req, resp);\r
+ return;\r
+ }\r
+ String bhdr = req.getHeader(BEHALF_HEADER);\r
+ if (bhdr == null) {\r
+ message = "Missing "+BEHALF_HEADER+" header.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ int subid = getIdFromPath(req);\r
+ if (subid < 0) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ Subscription sub = Subscription.getSubscriptionById(subid);\r
+ if (sub == null) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
+ return;\r
+ }\r
+ // Check with the Authorizer\r
+ AuthorizationResponse aresp = authz.decide(req);\r
+ if (! aresp.isAuthorized()) {\r
+ message = "Policy Engine disallows access.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(SUBFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(sub.asJSONObject(true).toString());\r
- }\r
- /**\r
- * PUT on the <subscriptionUrl> -- modify a subscription.\r
- * See the <i>Modifying a Subscription</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
- setIpAndFqdnForEelf("doPut");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPut(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- int subid = getIdFromPath(req);\r
- if (subid < 0) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- Subscription oldsub = Subscription.getSubscriptionById(subid);\r
- if (oldsub == null) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- // check content type is SUB_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(SUB_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- if (intlogger.isDebugEnabled())\r
- intlogger.debug(jo.toString());\r
- Subscription sub = null;\r
- try {\r
- sub = new Subscription(jo);\r
- } catch (InvalidObjectException e) {\r
- message = e.getMessage();\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- sub.setSubid(oldsub.getSubid());\r
- sub.setFeedid(oldsub.getFeedid());\r
- sub.setSubscriber(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header\r
+ // send response\r
+ elr.setResult(HttpServletResponse.SC_OK);\r
+ eventlogger.info(elr);\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ resp.setContentType(SUBFULL_CONTENT_TYPE);\r
+ resp.getOutputStream().print(sub.asJSONObject(true).toString());\r
+ }\r
+ /**\r
+ * PUT on the <subscriptionUrl> -- modify a subscription.\r
+ * See the <i>Modifying a Subscription</i> section in the <b>Provisioning API</b>\r
+ * document for details on how this method should be invoked.\r
+ */\r
+ @Override\r
+ public void doPut(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ setIpAndFqdnForEelf("doPut");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF_AND_SUBID, req.getHeader(BEHALF_HEADER),getIdFromPath(req)+"");\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ String message = isAuthorizedForProvisioning(req);\r
+ if (message != null) {\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
+ if (isProxyServer()) {\r
+ super.doPut(req, resp);\r
+ return;\r
+ }\r
+ String bhdr = req.getHeader(BEHALF_HEADER);\r
+ if (bhdr == null) {\r
+ message = "Missing "+BEHALF_HEADER+" header.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ int subid = getIdFromPath(req);\r
+ if (subid < 0) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ Subscription oldsub = Subscription.getSubscriptionById(subid);\r
+ if (oldsub == null) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_NOT_FOUND);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_NOT_FOUND, message);\r
+ return;\r
+ }\r
+ // Check with the Authorizer\r
+ AuthorizationResponse aresp = authz.decide(req);\r
+ if (! aresp.isAuthorized()) {\r
+ message = "Policy Engine disallows access.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
+ // check content type is SUB_CONTENT_TYPE, version 1.0\r
+ ContentHeader ch = getContentHeader(req);\r
+ String ver = ch.getAttribute("version");\r
+ if (!ch.getType().equals(SUB_BASECONTENT_TYPE) || !(ver.equals("1.0") || ver.equals("2.0"))) {\r
+ message = "Incorrect content-type";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
+ return;\r
+ }\r
+ JSONObject jo = getJSONfromInput(req);\r
+ if (jo == null) {\r
+ message = "Badly formed JSON";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ if (intlogger.isDebugEnabled())\r
+ intlogger.debug(jo.toString());\r
+ Subscription sub = null;\r
+ try {\r
+ sub = new Subscription(jo);\r
+ } catch (InvalidObjectException e) {\r
+ message = e.getMessage();\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ sub.setSubid(oldsub.getSubid());\r
+ sub.setFeedid(oldsub.getFeedid());\r
+ sub.setSubscriber(bhdr); // set from X-ATT-DR-ON-BEHALF-OF header\r
\r
- String subjectgroup = (req.getHeader("X-ATT-DR-ON-BEHALF-OF-GROUP")); //Adding for group feature:Rally US708115 \r
- if (!oldsub.getSubscriber().equals(sub.getSubscriber()) && subjectgroup == null) {\r
- message = "This subscriber must be modified by the same subscriber that created it.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
+ String subjectgroup = (req.getHeader("X-ATT-DR-ON-BEHALF-OF-GROUP")); //Adding for group feature:Rally US708115\r
+ if (!oldsub.getSubscriber().equals(sub.getSubscriber()) && subjectgroup == null) {\r
+ message = "This subscriber must be modified by the same subscriber that created it.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
\r
- // Update SUBSCRIPTIONS table entries\r
- if (doUpdate(sub)) {\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_OK);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_OK);\r
- resp.setContentType(SUBFULL_CONTENT_TYPE);\r
- resp.getOutputStream().print(sub.asLimitedJSONObject().toString());\r
+ // Update SUBSCRIPTIONS table entries\r
+ if (doUpdate(sub)) {\r
+ // send response\r
+ elr.setResult(HttpServletResponse.SC_OK);\r
+ eventlogger.info(elr);\r
+ resp.setStatus(HttpServletResponse.SC_OK);\r
+ resp.setContentType(SUBFULL_CONTENT_TYPE);\r
+ resp.getOutputStream().print(sub.asLimitedJSONObject().toString());\r
\r
- /**Change Owner ship of Subscriber Adding for group feature:Rally US708115*/\r
- if (jo.has("changeowner") && subjectgroup != null) {\r
- Boolean changeowner = (Boolean) jo.get("changeowner");\r
- if (changeowner != null && changeowner.equals(true)) {\r
- sub.setSubscriber(req.getHeader(BEHALF_HEADER));\r
- sub.changeOwnerShip();\r
- }\r
- }\r
- /***End of change ownership*/\r
+ /**Change Owner ship of Subscriber Adding for group feature:Rally US708115*/\r
+ if (jo.has("changeowner") && subjectgroup != null) {\r
+ Boolean changeowner = (Boolean) jo.get("changeowner");\r
+ if (changeowner != null && changeowner.equals(true)) {\r
+ sub.setSubscriber(req.getHeader(BEHALF_HEADER));\r
+ sub.changeOwnerShip();\r
+ }\r
+ }\r
+ /***End of change ownership*/\r
\r
- provisioningDataChanged();\r
- } else {\r
- // Something went wrong with the UPDATE\r
- elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
- }\r
- }\r
- /**\r
- * POST on the <subscriptionUrl> -- control a subscription.\r
- * See the <i>Resetting a Subscription's Retry Schedule</i> section in the <b>Provisioning API</b>\r
- * document for details on how this method should be invoked.\r
- */\r
- @Override\r
- public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
+ provisioningDataChanged();\r
+ } else {\r
+ // Something went wrong with the UPDATE\r
+ elr.setResult(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, DB_PROBLEM_MSG);\r
+ }\r
+ }\r
+ /**\r
+ * POST on the <subscriptionUrl> -- control a subscription.\r
+ * See the <i>Resetting a Subscription's Retry Schedule</i> section in the <b>Provisioning API</b>\r
+ * document for details on how this method should be invoked.\r
+ */\r
+ @Override\r
+ public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException {\r
// OLD pre-3.0 code\r
-// String message = "POST not allowed for the subscriptionURL.";\r
-// EventLogRecord elr = new EventLogRecord(req);\r
-// elr.setMessage(message);\r
-// elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
-// eventlogger.info(elr);\r
-// resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
+// String message = "POST not allowed for the subscriptionURL.";\r
+// EventLogRecord elr = new EventLogRecord(req);\r
+// elr.setMessage(message);\r
+// elr.setResult(HttpServletResponse.SC_METHOD_NOT_ALLOWED);\r
+// eventlogger.info(elr);\r
+// resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message);\r
\r
- setIpAndFqdnForEelf("doPost");\r
- eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
- EventLogRecord elr = new EventLogRecord(req);\r
- String message = isAuthorizedForProvisioning(req);\r
- if (message != null) {\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- if (isProxyServer()) {\r
- super.doPost(req, resp);\r
- return;\r
- }\r
- String bhdr = req.getHeader(BEHALF_HEADER);\r
- if (bhdr == null) {\r
- message = "Missing "+BEHALF_HEADER+" header.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- final int subid = getIdFromPath(req);\r
- if (subid < 0 || Subscription.getSubscriptionById(subid) == null) {\r
- message = "Missing or bad subscription number.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- // check content type is SUBCNTRL_CONTENT_TYPE, version 1.0\r
- ContentHeader ch = getContentHeader(req);\r
- String ver = ch.getAttribute("version");\r
- if (!ch.getType().equals(SUBCNTRL_CONTENT_TYPE) || !ver.equals("1.0")) {\r
- message = "Incorrect content-type";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
- return;\r
- }\r
- // Check with the Authorizer\r
- AuthorizationResponse aresp = authz.decide(req);\r
- if (! aresp.isAuthorized()) {\r
- message = "Policy Engine disallows access.";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
- return;\r
- }\r
- JSONObject jo = getJSONfromInput(req);\r
- if (jo == null) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- return;\r
- }\r
- try {\r
- // Only the active POD sends notifications\r
- boolean active = SynchronizerTask.getSynchronizer().isActive();\r
- boolean b = jo.getBoolean("failed");\r
- if (active && !b) {\r
- // Notify all nodes to reset the subscription\r
- SubscriberNotifyThread t = new SubscriberNotifyThread();\r
- t.resetSubscription(subid);\r
- t.start();\r
- }\r
- // send response\r
- elr.setResult(HttpServletResponse.SC_ACCEPTED);\r
- eventlogger.info(elr);\r
- resp.setStatus(HttpServletResponse.SC_ACCEPTED);\r
- } catch (JSONException e) {\r
- message = "Badly formed JSON";\r
- elr.setMessage(message);\r
- elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
- eventlogger.info(elr);\r
- resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
- }\r
- }\r
+ setIpAndFqdnForEelf("doPost");\r
+ eelflogger.info(EelfMsgs.MESSAGE_WITH_BEHALF, req.getHeader(BEHALF_HEADER));\r
+ EventLogRecord elr = new EventLogRecord(req);\r
+ String message = isAuthorizedForProvisioning(req);\r
+ if (message != null) {\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
+ if (isProxyServer()) {\r
+ super.doPost(req, resp);\r
+ return;\r
+ }\r
+ String bhdr = req.getHeader(BEHALF_HEADER);\r
+ if (bhdr == null) {\r
+ message = "Missing "+BEHALF_HEADER+" header.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ final int subid = getIdFromPath(req);\r
+ if (subid < 0 || Subscription.getSubscriptionById(subid) == null) {\r
+ message = "Missing or bad subscription number.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ // check content type is SUBCNTRL_CONTENT_TYPE, version 1.0\r
+ ContentHeader ch = getContentHeader(req);\r
+ String ver = ch.getAttribute("version");\r
+ if (!ch.getType().equals(SUBCNTRL_CONTENT_TYPE) || !ver.equals("1.0")) {\r
+ message = "Incorrect content-type";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, message);\r
+ return;\r
+ }\r
+ // Check with the Authorizer\r
+ AuthorizationResponse aresp = authz.decide(req);\r
+ if (! aresp.isAuthorized()) {\r
+ message = "Policy Engine disallows access.";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_FORBIDDEN);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_FORBIDDEN, message);\r
+ return;\r
+ }\r
+ JSONObject jo = getJSONfromInput(req);\r
+ if (jo == null) {\r
+ message = "Badly formed JSON";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ return;\r
+ }\r
+ try {\r
+ // Only the active POD sends notifications\r
+ boolean active = SynchronizerTask.getSynchronizer().isActive();\r
+ boolean b = jo.getBoolean("failed");\r
+ if (active && !b) {\r
+ // Notify all nodes to reset the subscription\r
+ SubscriberNotifyThread t = new SubscriberNotifyThread();\r
+ t.resetSubscription(subid);\r
+ t.start();\r
+ }\r
+ // send response\r
+ elr.setResult(HttpServletResponse.SC_ACCEPTED);\r
+ eventlogger.info(elr);\r
+ resp.setStatus(HttpServletResponse.SC_ACCEPTED);\r
+ } catch (JSONException e) {\r
+ message = "Badly formed JSON";\r
+ elr.setMessage(message);\r
+ elr.setResult(HttpServletResponse.SC_BAD_REQUEST);\r
+ eventlogger.info(elr);\r
+ resp.sendError(HttpServletResponse.SC_BAD_REQUEST, message);\r
+ }\r
+ }\r
\r
- /**\r
- * A Thread class used to serially send reset notifications to all nodes in the DR network,\r
- * when a POST is received for a subscription.\r
- */\r
- public class SubscriberNotifyThread extends Thread {\r
- public static final String URL_TEMPLATE = "http://%s/internal/resetSubscription/%d";\r
- private List<String> urls = new Vector<String>();\r
+ /**\r
+ * A Thread class used to serially send reset notifications to all nodes in the DR network,\r
+ * when a POST is received for a subscription.\r
+ */\r
+ public class SubscriberNotifyThread extends Thread {\r
+ public static final String URL_TEMPLATE = "http://%s/internal/resetSubscription/%d";\r
+ private List<String> urls = new Vector<String>();\r
\r
- public SubscriberNotifyThread() {\r
- setName("SubscriberNotifyThread");\r
- }\r
- public void resetSubscription(int subid) {\r
- for (String nodename : BaseServlet.getNodes()) {\r
- String u = String.format(URL_TEMPLATE, nodename, subid);\r
- urls.add(u);\r
- }\r
- }\r
- public void run() {\r
- try {\r
- while (!urls.isEmpty()) {\r
- String u = urls.remove(0);\r
- try {\r
- URL url = new URL(u);\r
- HttpURLConnection conn = (HttpURLConnection) url.openConnection();\r
- conn.connect();\r
- conn.getContentLength(); // Force the GET through\r
- conn.disconnect();\r
- } catch (IOException e) {\r
- intlogger.info("IOException Error accessing URL: "+u+": " + e.getMessage());\r
- }\r
- }\r
- } catch (Exception e) {\r
- intlogger.warn("Caught exception in SubscriberNotifyThread: "+e);\r
- e.printStackTrace();\r
- }\r
- }\r
- }\r
+ public SubscriberNotifyThread() {\r
+ setName("SubscriberNotifyThread");\r
+ }\r
+ public void resetSubscription(int subid) {\r
+ for (String nodename : BaseServlet.getNodes()) {\r
+ String u = String.format(URL_TEMPLATE, nodename, subid);\r
+ urls.add(u);\r
+ }\r
+ }\r
+ public void run() {\r
+ try {\r
+ while (!urls.isEmpty()) {\r
+ String u = urls.remove(0);\r
+ try {\r
+ URL url = new URL(u);\r
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();\r
+ conn.connect();\r
+ conn.getContentLength(); // Force the GET through\r
+ conn.disconnect();\r
+ } catch (IOException e) {\r
+ intlogger.info("IOException Error accessing URL: "+u+": " + e.getMessage());\r
+ }\r
+ }\r
+ } catch (Exception e) {\r
+ intlogger.warn("Caught exception in SubscriberNotifyThread: "+e);\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ }\r
}\r
-/*******************************************************************************\r
- * ============LICENSE_START==================================================\r
- * * org.onap.dmaap\r
- * * ===========================================================================\r
- * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * * ===========================================================================\r
- * * Licensed under the Apache License, Version 2.0 (the "License");\r
- * * you may not use this file except in compliance with the License.\r
- * * You may obtain a copy of the License at\r
- * * \r
- * * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
- * * Unless required by applicable law or agreed to in writing, software\r
- * * distributed under the License is distributed on an "AS IS" BASIS,\r
- * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * * See the License for the specific language governing permissions and\r
- * * limitations under the License.\r
- * * ============LICENSE_END====================================================\r
- * *\r
- * * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- * *\r
- ******************************************************************************/\r
-\r
-\r
-package org.onap.dmaap.datarouter.provisioning;\r
-\r
-import java.io.ByteArrayOutputStream;\r
-import java.io.File;\r
-import java.io.FileInputStream;\r
-import java.io.InputStream;\r
-import java.net.InetAddress;\r
-import java.net.UnknownHostException;\r
-import java.nio.file.Files;\r
-import java.nio.file.Path;\r
-import java.nio.file.Paths;\r
-import java.nio.file.StandardCopyOption;\r
-import java.security.KeyStore;\r
-import java.sql.Connection;\r
-import java.sql.SQLException;\r
-import java.util.ArrayList;\r
-import java.util.Arrays;\r
-import java.util.Collection;\r
-import java.util.HashMap;\r
-import java.util.Map;\r
-import java.util.Properties;\r
-import java.util.Set;\r
-import java.util.Timer;\r
-import java.util.TimerTask;\r
-import java.util.TreeSet;\r
-\r
-import javax.servlet.http.HttpServletResponse;\r
-\r
-import org.apache.http.HttpEntity;\r
-import org.apache.http.HttpResponse;\r
-import org.apache.http.client.methods.HttpGet;\r
-import org.apache.http.client.methods.HttpPost;\r
-import org.apache.http.conn.scheme.Scheme;\r
-import org.apache.http.conn.ssl.SSLSocketFactory;\r
-import org.apache.http.entity.ByteArrayEntity;\r
-import org.apache.http.entity.ContentType;\r
-import org.apache.http.impl.client.AbstractHttpClient;\r
-import org.apache.http.impl.client.DefaultHttpClient;\r
-import org.apache.log4j.Logger;\r
-import org.json.JSONArray;\r
-import org.json.JSONException;\r
-import org.json.JSONObject;\r
-import org.json.JSONTokener;\r
-import org.onap.dmaap.datarouter.provisioning.beans.EgressRoute;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Feed;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Group;\r
-import org.onap.dmaap.datarouter.provisioning.beans.IngressRoute;\r
-import org.onap.dmaap.datarouter.provisioning.beans.NetworkRoute;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Parameters;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Subscription;\r
-import org.onap.dmaap.datarouter.provisioning.beans.Syncable;\r
-import org.onap.dmaap.datarouter.provisioning.utils.DB;\r
-import org.onap.dmaap.datarouter.provisioning.utils.LogfileLoader;\r
-import org.onap.dmaap.datarouter.provisioning.utils.RLEBitSet;\r
-import org.onap.dmaap.datarouter.provisioning.utils.URLUtilities;\r
-\r
-/**\r
- * This class handles synchronization between provisioning servers (PODs). It has three primary functions:\r
- * <ol>\r
- * <li>Checking DNS once per minute to see which POD the DNS CNAME points to. The CNAME will point to\r
- * the active (master) POD.</li>\r
- * <li>On non-master (standby) PODs, fetches provisioning data and logs in order to keep MariaDB in sync.</li>\r
- * <li>Providing information to other parts of the system as to the current role (ACTIVE, STANDBY, UNKNOWN)\r
- * of this POD.</li>\r
- * </ol>\r
- * <p>For this to work correctly, the following code needs to be placed at the beginning of main().</p>\r
- * <code>\r
- * Security.setProperty("networkaddress.cache.ttl", "10");\r
- * </code>\r
- *\r
- * @author Robert Eby\r
- * @version $Id: SynchronizerTask.java,v 1.10 2014/03/21 13:50:10 eby Exp $\r
- */\r
-public class SynchronizerTask extends TimerTask {\r
- /** This is a singleton -- there is only one SynchronizerTask object in the server */\r
- private static SynchronizerTask synctask;\r
-\r
- /** This POD is unknown -- not on the list of PODs */\r
- public static final int UNKNOWN = 0;\r
- /** This POD is active -- on the list of PODs, and the DNS CNAME points to us */\r
- public static final int ACTIVE = 1;\r
- /** This POD is standby -- on the list of PODs, and the DNS CNAME does not point to us */\r
- public static final int STANDBY = 2;\r
- private static final String[] stnames = { "UNKNOWN", "ACTIVE", "STANDBY" };\r
- private static final long ONE_HOUR = 60 * 60 * 1000L;\r
-\r
- private final Logger logger;\r
- private final Timer rolex;\r
- private final String spooldir;\r
- private int state;\r
- private boolean doFetch;\r
- private long nextsynctime;\r
- private AbstractHttpClient httpclient = null;\r
-\r
- /**\r
- * Get the singleton SynchronizerTask object.\r
- * @return the SynchronizerTask\r
- */\r
- public static synchronized SynchronizerTask getSynchronizer() {\r
- if (synctask == null)\r
- synctask = new SynchronizerTask();\r
- return synctask;\r
- }\r
-\r
- @SuppressWarnings("deprecation")\r
- private SynchronizerTask() {\r
- logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- rolex = new Timer();\r
- spooldir = (new DB()).getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");\r
- state = UNKNOWN;\r
- doFetch = true; // start off with a fetch\r
- nextsynctime = 0;\r
-\r
- logger.info("PROV5000: Sync task starting, server state is UNKNOWN");\r
- try {\r
- Properties props = (new DB()).getProperties();\r
- String type = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");\r
- String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);\r
- String pass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);\r
- KeyStore keyStore = KeyStore.getInstance(type);\r
- FileInputStream instream = new FileInputStream(new File(store));\r
- keyStore.load(instream, pass.toCharArray());\r
- instream.close();\r
-\r
- store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);\r
- pass = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);\r
- KeyStore trustStore = null;\r
- if (store != null && store.length() > 0) {\r
- trustStore = KeyStore.getInstance(KeyStore.getDefaultType());\r
- instream = new FileInputStream(new File(store));\r
- trustStore.load(instream, pass.toCharArray());\r
- instream.close();\r
- }\r
-\r
- // We are connecting with the node name, but the certificate will have the CNAME\r
- // So we need to accept a non-matching certificate name\r
- String keystorepass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY); //itrack.web.att.com/browse/DATARTR-6 for changing hard coded passphase ref\r
- AbstractHttpClient hc = new DefaultHttpClient();\r
- SSLSocketFactory socketFactory =\r
- (trustStore == null)\r
- ? new SSLSocketFactory(keyStore, keystorepass)\r
- : new SSLSocketFactory(keyStore, keystorepass, trustStore);\r
- socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);\r
- Scheme sch = new Scheme("https", 443, socketFactory);\r
- hc.getConnectionManager().getSchemeRegistry().register(sch);\r
- httpclient = hc;\r
-\r
- // Run once every 5 seconds to check DNS, etc.\r
- long interval = 0;\r
- try {\r
- String s = props.getProperty("org.onap.dmaap.datarouter.provserver.sync_interval", "5000");\r
- interval = Long.parseLong(s);\r
- } catch (NumberFormatException e) {\r
- interval = 5000L;\r
- }\r
- rolex.scheduleAtFixedRate(this, 0L, interval);\r
- } catch (Exception e) {\r
- logger.warn("PROV5005: Problem starting the synchronizer: "+e);\r
- }\r
- }\r
-\r
- /**\r
- * What is the state of this POD?\r
- * @return one of ACTIVE, STANDBY, UNKNOWN\r
- */\r
- public int getState() {\r
- return state;\r
- }\r
-\r
- /**\r
- * Is this the active POD?\r
- * @return true if we are active (the master), false otherwise\r
- */\r
- public boolean isActive() {\r
- return state == ACTIVE;\r
- }\r
-\r
- /**\r
- * This method is used to signal that another POD (the active POD) has sent us a /fetchProv request,\r
- * and that we should re-synchronize with the master.\r
- */\r
- public void doFetch() {\r
- doFetch = true;\r
- }\r
-\r
- /**\r
- * Runs once a minute in order to <ol>\r
- * <li>lookup DNS names,</li>\r
- * <li>determine the state of this POD,</li>\r
- * <li>if this is a standby POD, and the fetch flag is set, perform a fetch of state from the active POD.</li>\r
- * <li>if this is a standby POD, check if there are any new log records to be replicated.</li>\r
- * </ol>\r
- */\r
- @Override\r
- public void run() {\r
- try {\r
- state = lookupState();\r
- if (state == STANDBY) {\r
- // Only copy provisioning data FROM the active server TO the standby\r
- if (doFetch || (System.currentTimeMillis() >= nextsynctime)) {\r
- logger.debug("Initiating a sync...");\r
- JSONObject jo = readProvisioningJSON();\r
- if (jo != null) {\r
- doFetch = false;\r
- syncFeeds( jo.getJSONArray("feeds"));\r
- syncSubs( jo.getJSONArray("subscriptions"));\r
- syncGroups( jo.getJSONArray("groups")); //Rally:US708115 - 1610\r
- syncParams(jo.getJSONObject("parameters"));\r
- // The following will not be present in a version=1.0 provfeed\r
- JSONArray ja = jo.optJSONArray("ingress");\r
- if (ja != null)\r
- syncIngressRoutes(ja);\r
- JSONObject j2 = jo.optJSONObject("egress");\r
- if (j2 != null)\r
- syncEgressRoutes( j2);\r
- ja = jo.optJSONArray("routing");\r
- if (ja != null)\r
- syncNetworkRoutes(ja);\r
- }\r
- logger.info("PROV5013: Sync completed.");\r
- nextsynctime = System.currentTimeMillis() + ONE_HOUR;\r
- }\r
- } else {\r
- // Don't do fetches on non-standby PODs\r
- doFetch = false;\r
- }\r
-\r
- // Fetch DR logs as needed - server to server\r
- LogfileLoader lfl = LogfileLoader.getLoader();\r
- if (lfl.isIdle()) {\r
- // Only fetch new logs if the loader is waiting for them.\r
- logger.trace("Checking for logs to replicate...");\r
- RLEBitSet local = lfl.getBitSet();\r
- RLEBitSet remote = readRemoteLoglist();\r
- remote.andNot(local);\r
- if (!remote.isEmpty()) {\r
- logger.debug(" Replicating logs: "+remote);\r
- replicateDRLogs(remote);\r
- }\r
- }\r
- } catch (Exception e) {\r
- logger.warn("PROV0020: Caught exception in SynchronizerTask: "+e);\r
- e.printStackTrace();\r
- }\r
- }\r
-\r
- /**\r
- * This method is used to lookup the CNAME that points to the active server.\r
- * It returns 0 (UNKNOWN), 1(ACTIVE), or 2 (STANDBY) to indicate the state of this server.\r
- * @return the current state\r
- */\r
- private int lookupState() {\r
- int newstate = UNKNOWN;\r
- try {\r
- InetAddress myaddr = InetAddress.getLocalHost();\r
- if (logger.isTraceEnabled())\r
- logger.trace("My address: "+myaddr);\r
- String this_pod = myaddr.getHostName();\r
- Set<String> pods = new TreeSet<String>(Arrays.asList(BaseServlet.getPods()));\r
- if (pods.contains(this_pod)) {\r
- InetAddress pserver = InetAddress.getByName(BaseServlet.active_prov_name);\r
- newstate = myaddr.equals(pserver) ? ACTIVE : STANDBY;\r
- if (logger.isDebugEnabled() && System.currentTimeMillis() >= next_msg) {\r
- logger.debug("Active POD = "+pserver+", Current state is "+stnames[newstate]);\r
- next_msg = System.currentTimeMillis() + (5 * 60 * 1000L);\r
- }\r
- } else {\r
- logger.warn("PROV5003: My name ("+this_pod+") is missing from the list of provisioning servers.");\r
- }\r
- } catch (UnknownHostException e) {\r
- logger.warn("PROV5002: Cannot determine the name of this provisioning server.");\r
- }\r
-\r
- if (newstate != state)\r
- logger.info(String.format("PROV5001: Server state changed from %s to %s", stnames[state], stnames[newstate]));\r
- return newstate;\r
- }\r
- private static long next_msg = 0; // only display the "Current state" msg every 5 mins.\r
- /** Synchronize the Feeds in the JSONArray, with the Feeds in the DB. */\r
- private void syncFeeds(JSONArray ja) {\r
- Collection<Syncable> coll = new ArrayList<Syncable>();\r
- for (int n = 0; n < ja.length(); n++) {\r
- try {\r
- Feed f = new Feed(ja.getJSONObject(n));\r
- coll.add(f);\r
- } catch (Exception e) {\r
- logger.warn("PROV5004: Invalid object in feed: "+ja.optJSONObject(n));\r
- }\r
- }\r
- if (sync(coll, Feed.getAllFeeds()))\r
- BaseServlet.provisioningDataChanged();\r
- }\r
- /** Synchronize the Subscriptions in the JSONArray, with the Subscriptions in the DB. */\r
- private void syncSubs(JSONArray ja) {\r
- Collection<Syncable> coll = new ArrayList<Syncable>();\r
- for (int n = 0; n < ja.length(); n++) {\r
- try {\r
- //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- JSONObject j = ja.getJSONObject(n); \r
- j.put("sync", "true");\r
- Subscription s = new Subscription(j);\r
- coll.add(s);\r
- } catch (Exception e) {\r
- logger.warn("PROV5004: Invalid object in subscription: "+ja.optJSONObject(n));\r
- }\r
- }\r
- if (sync(coll, Subscription.getAllSubscriptions()))\r
- BaseServlet.provisioningDataChanged();\r
- }\r
-\r
- /** Rally:US708115 - Synchronize the Groups in the JSONArray, with the Groups in the DB. */ \r
- private void syncGroups(JSONArray ja) { \r
- Collection<Syncable> coll = new ArrayList<Syncable>(); \r
- for (int n = 0; n < ja.length(); n++) { \r
- try { \r
- Group g = new Group(ja.getJSONObject(n)); \r
- coll.add(g); \r
- } catch (Exception e) { \r
- logger.warn("PROV5004: Invalid object in subscription: "+ja.optJSONObject(n)); \r
- } \r
- } \r
- if (sync(coll, Group.getAllgroups())) \r
- BaseServlet.provisioningDataChanged(); \r
- }\r
-\r
-\r
- /** Synchronize the Parameters in the JSONObject, with the Parameters in the DB. */\r
- private void syncParams(JSONObject jo) {\r
- Collection<Syncable> coll = new ArrayList<Syncable>();\r
- for (String k : jo.keySet()) {\r
- String v = "";\r
- try {\r
- v = jo.getString(k);\r
- } catch (JSONException e) {\r
- try {\r
- v = ""+jo.getInt(k);\r
- } catch (JSONException e1) {\r
- JSONArray ja = jo.getJSONArray(k);\r
- for (int i = 0; i < ja.length(); i++) {\r
- if (i > 0)\r
- v += "|";\r
- v += ja.getString(i);\r
- }\r
- }\r
- }\r
- coll.add(new Parameters(k, v));\r
- }\r
- if (sync(coll, Parameters.getParameterCollection())) {\r
- BaseServlet.provisioningDataChanged();\r
- BaseServlet.provisioningParametersChanged();\r
- }\r
- }\r
- private void syncIngressRoutes(JSONArray ja) {\r
- Collection<Syncable> coll = new ArrayList<Syncable>();\r
- for (int n = 0; n < ja.length(); n++) {\r
- try {\r
- IngressRoute in = new IngressRoute(ja.getJSONObject(n));\r
- coll.add(in);\r
- } catch (NumberFormatException e) {\r
- logger.warn("PROV5004: Invalid object in ingress routes: "+ja.optJSONObject(n));\r
- }\r
- }\r
- if (sync(coll, IngressRoute.getAllIngressRoutes()))\r
- BaseServlet.provisioningDataChanged();\r
- }\r
- private void syncEgressRoutes(JSONObject jo) {\r
- Collection<Syncable> coll = new ArrayList<Syncable>();\r
- for (String key : jo.keySet()) {\r
- try {\r
- int sub = Integer.parseInt(key);\r
- String node = jo.getString(key);\r
- EgressRoute er = new EgressRoute(sub, node);\r
- coll.add(er);\r
- } catch (NumberFormatException e) {\r
- logger.warn("PROV5004: Invalid subid in egress routes: "+key);\r
- } catch (IllegalArgumentException e) {\r
- logger.warn("PROV5004: Invalid node name in egress routes: "+key);\r
- }\r
- }\r
- if (sync(coll, EgressRoute.getAllEgressRoutes()))\r
- BaseServlet.provisioningDataChanged();\r
- }\r
- private void syncNetworkRoutes(JSONArray ja) {\r
- Collection<Syncable> coll = new ArrayList<Syncable>();\r
- for (int n = 0; n < ja.length(); n++) {\r
- try {\r
- NetworkRoute nr = new NetworkRoute(ja.getJSONObject(n));\r
- coll.add(nr);\r
- } catch (JSONException e) {\r
- logger.warn("PROV5004: Invalid object in network routes: "+ja.optJSONObject(n));\r
- }\r
- }\r
- if (sync(coll, NetworkRoute.getAllNetworkRoutes()))\r
- BaseServlet.provisioningDataChanged();\r
- }\r
- private boolean sync(Collection<? extends Syncable> newc, Collection<? extends Syncable> oldc) {\r
- boolean changes = false;\r
- try {\r
- Map<String, Syncable> newmap = getMap(newc);\r
- Map<String, Syncable> oldmap = getMap(oldc);\r
- Set<String> union = new TreeSet<String>(newmap.keySet());\r
- union.addAll(oldmap.keySet());\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- for (String n : union) {\r
- Syncable newobj = newmap.get(n);\r
- Syncable oldobj = oldmap.get(n);\r
- if (oldobj == null) {\r
- if (logger.isDebugEnabled())\r
- logger.debug(" Inserting record: "+newobj);\r
- newobj.doInsert(conn);\r
- changes = true;\r
- } else if (newobj == null) {\r
- if (logger.isDebugEnabled())\r
- logger.debug(" Deleting record: "+oldobj);\r
- oldobj.doDelete(conn);\r
- changes = true;\r
- } else if (!newobj.equals(oldobj)) {\r
- if (logger.isDebugEnabled())\r
- logger.debug(" Updating record: "+newobj);\r
- newobj.doUpdate(conn);\r
-\r
- /**Rally US708115\r
- * Change Ownership of FEED - 1610, Syncronised with secondary DB.\r
- * */\r
- checkChnageOwner(newobj, oldobj);\r
-\r
- changes = true;\r
- }\r
- }\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- logger.warn("PROV5009: problem during sync, exception: "+e);\r
- e.printStackTrace();\r
- }\r
- return changes;\r
- }\r
- private Map<String, Syncable> getMap(Collection<? extends Syncable> c) {\r
- Map<String, Syncable> map = new HashMap<String, Syncable>();\r
- for (Syncable v : c) {\r
- map.put(v.getKey(), v);\r
- }\r
- return map;\r
- }\r
- \r
-\r
- /**Change owner of FEED/SUBSCRIPTION*/\r
- /**Rally US708115\r
- * Change Ownership of FEED - 1610\r
- * \r
- * */\r
- private void checkChnageOwner(Syncable newobj, Syncable oldobj) {\r
- if(newobj instanceof Feed) {\r
- Feed oldfeed = (Feed) oldobj;\r
- Feed newfeed = (Feed) newobj;\r
- \r
- if(!oldfeed.getPublisher().equals(newfeed.getPublisher())){\r
- logger.info("PROV5013 - Previous publisher: "+oldfeed.getPublisher() +": New publisher-"+newfeed.getPublisher());\r
- oldfeed.setPublisher(newfeed.getPublisher());\r
- oldfeed.changeOwnerShip();\r
- }\r
- }\r
- else if(newobj instanceof Subscription) {\r
- Subscription oldsub = (Subscription) oldobj;\r
- Subscription newsub = (Subscription) newobj;\r
- \r
- if(!oldsub.getSubscriber().equals(newsub.getSubscriber())){\r
- logger.info("PROV5013 - Previous subscriber: "+oldsub.getSubscriber() +": New subscriber-"+newsub.getSubscriber());\r
- oldsub.setSubscriber(newsub.getSubscriber());\r
- oldsub.changeOwnerShip();\r
- }\r
- }\r
- \r
- }\r
-\r
- /**\r
- * Issue a GET on the peer POD's /internal/prov/ URL to get a copy of its provisioning data.\r
- * @return the provisioning data (as a JONObject)\r
- */\r
- private synchronized JSONObject readProvisioningJSON() {\r
- String url = URLUtilities.generatePeerProvURL();\r
- HttpGet get = new HttpGet(url);\r
- try {\r
- HttpResponse response = httpclient.execute(get);\r
- int code = response.getStatusLine().getStatusCode();\r
- if (code != HttpServletResponse.SC_OK) {\r
- logger.warn("PROV5010: readProvisioningJSON failed, bad error code: "+code);\r
- return null;\r
- }\r
- HttpEntity entity = response.getEntity();\r
- String ctype = entity.getContentType().getValue().trim();\r
- if (!ctype.equals(BaseServlet.PROVFULL_CONTENT_TYPE1) && !ctype.equals(BaseServlet.PROVFULL_CONTENT_TYPE2)) {\r
- logger.warn("PROV5011: readProvisioningJSON failed, bad content type: "+ctype);\r
- return null;\r
- }\r
- return new JSONObject(new JSONTokener(entity.getContent()));\r
- } catch (Exception e) {\r
- logger.warn("PROV5012: readProvisioningJSON failed, exception: "+e);\r
- return null;\r
- } finally {\r
- get.releaseConnection();\r
- }\r
- }\r
- /**\r
- * Issue a GET on the peer POD's /internal/drlogs/ URL to get an RELBitSet representing the\r
- * log records available in the remote database.\r
- * @return the bitset\r
- */\r
- private RLEBitSet readRemoteLoglist() {\r
- RLEBitSet bs = new RLEBitSet();\r
- String url = URLUtilities.generatePeerLogsURL();\r
-\r
- //Fixing if only one Prov is configured, not to give exception to fill logs, return empty bitset.\r
- if(url.equals("")) {\r
- return bs;\r
- }\r
- //End of fix.\r
-\r
- HttpGet get = new HttpGet(url);\r
- try {\r
- HttpResponse response = httpclient.execute(get);\r
- int code = response.getStatusLine().getStatusCode();\r
- if (code != HttpServletResponse.SC_OK) {\r
- logger.warn("PROV5010: readRemoteLoglist failed, bad error code: "+code);\r
- return bs;\r
- }\r
- HttpEntity entity = response.getEntity();\r
- String ctype = entity.getContentType().getValue().trim();\r
- if (!ctype.equals("text/plain")) {\r
- logger.warn("PROV5011: readRemoteLoglist failed, bad content type: "+ctype);\r
- return bs;\r
- }\r
- InputStream is = entity.getContent();\r
- ByteArrayOutputStream bos = new ByteArrayOutputStream();\r
- int ch = 0;\r
- while ((ch = is.read()) >= 0)\r
- bos.write(ch);\r
- bs.set(bos.toString());\r
- is.close();\r
- } catch (Exception e) {\r
- logger.warn("PROV5012: readRemoteLoglist failed, exception: "+e);\r
- return bs;\r
- } finally {\r
- get.releaseConnection();\r
- }\r
- return bs;\r
- }\r
- /**\r
- * Issue a POST on the peer POD's /internal/drlogs/ URL to fetch log records available\r
- * in the remote database that we wish to copy to the local database.\r
- * @param bs the bitset (an RELBitSet) of log records to fetch\r
- */\r
- private void replicateDRLogs(RLEBitSet bs) {\r
- String url = URLUtilities.generatePeerLogsURL();\r
- HttpPost post = new HttpPost(url);\r
- try {\r
- String t = bs.toString();\r
- HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create("text/plain"));\r
- post.setEntity(body);\r
- if (logger.isDebugEnabled())\r
- logger.debug("Requesting records: "+t);\r
-\r
- HttpResponse response = httpclient.execute(post);\r
- int code = response.getStatusLine().getStatusCode();\r
- if (code != HttpServletResponse.SC_OK) {\r
- logger.warn("PROV5010: replicateDRLogs failed, bad error code: "+code);\r
- return;\r
- }\r
- HttpEntity entity = response.getEntity();\r
- String ctype = entity.getContentType().getValue().trim();\r
- if (!ctype.equals("text/plain")) {\r
- logger.warn("PROV5011: replicateDRLogs failed, bad content type: "+ctype);\r
- return;\r
- }\r
-\r
- String spoolname = "" + System.currentTimeMillis();\r
- Path tmppath = Paths.get(spooldir, spoolname);\r
- Path donepath = Paths.get(spooldir, "IN."+spoolname);\r
- Files.copy(entity.getContent(), Paths.get(spooldir, spoolname), StandardCopyOption.REPLACE_EXISTING);\r
- Files.move(tmppath, donepath, StandardCopyOption.REPLACE_EXISTING);\r
- logger.info("Approximately "+bs.cardinality()+" records replicated.");\r
- } catch (Exception e) {\r
- logger.warn("PROV5012: replicateDRLogs failed, exception: "+e);\r
- } finally {\r
- post.releaseConnection();\r
- }\r
- }\r
-}\r
+/*******************************************************************************
+ * ============LICENSE_START==================================================
+ * * org.onap.dmaap
+ * * ===========================================================================
+ * * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * * ===========================================================================
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ * * ============LICENSE_END====================================================
+ * *
+ * * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ * *
+ ******************************************************************************/
+
+
+package org.onap.dmaap.datarouter.provisioning;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.security.KeyStore;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.TreeSet;
+
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.conn.scheme.Scheme;
+import org.apache.http.conn.ssl.SSLSocketFactory;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.entity.ContentType;
+import org.apache.http.impl.client.AbstractHttpClient;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.onap.dmaap.datarouter.provisioning.beans.EgressRoute;
+import org.onap.dmaap.datarouter.provisioning.beans.Feed;
+import org.onap.dmaap.datarouter.provisioning.beans.Group;
+import org.onap.dmaap.datarouter.provisioning.beans.IngressRoute;
+import org.onap.dmaap.datarouter.provisioning.beans.NetworkRoute;
+import org.onap.dmaap.datarouter.provisioning.beans.Parameters;
+import org.onap.dmaap.datarouter.provisioning.beans.Subscription;
+import org.onap.dmaap.datarouter.provisioning.beans.Syncable;
+import org.onap.dmaap.datarouter.provisioning.utils.DB;
+import org.onap.dmaap.datarouter.provisioning.utils.LogfileLoader;
+import org.onap.dmaap.datarouter.provisioning.utils.RLEBitSet;
+import org.onap.dmaap.datarouter.provisioning.utils.URLUtilities;
+
+/**
+ * This class handles synchronization between provisioning servers (PODs). It has three primary functions:
+ * <ol>
+ * <li>Checking DNS once per minute to see which POD the DNS CNAME points to. The CNAME will point to
+ * the active (master) POD.</li>
+ * <li>On non-master (standby) PODs, fetches provisioning data and logs in order to keep MariaDB in sync.</li>
+ * <li>Providing information to other parts of the system as to the current role (ACTIVE, STANDBY, UNKNOWN)
+ * of this POD.</li>
+ * </ol>
+ * <p>For this to work correctly, the following code needs to be placed at the beginning of main().</p>
+ * <code>
+ * Security.setProperty("networkaddress.cache.ttl", "10");
+ * </code>
+ *
+ * @author Robert Eby
+ * @version $Id: SynchronizerTask.java,v 1.10 2014/03/21 13:50:10 eby Exp $
+ */
+public class SynchronizerTask extends TimerTask {
+ /** This is a singleton -- there is only one SynchronizerTask object in the server */
+ private static SynchronizerTask synctask;
+
+ /** This POD is unknown -- not on the list of PODs */
+ public static final int UNKNOWN = 0;
+ /** This POD is active -- on the list of PODs, and the DNS CNAME points to us */
+ public static final int ACTIVE = 1;
+ /** This POD is standby -- on the list of PODs, and the DNS CNAME does not point to us */
+ public static final int STANDBY = 2;
+ private static final String[] stnames = { "UNKNOWN", "ACTIVE", "STANDBY" };
+ private static final long ONE_HOUR = 60 * 60 * 1000L;
+
+ private final Logger logger;
+ private final Timer rolex;
+ private final String spooldir;
+ private int state;
+ private boolean doFetch;
+ private long nextsynctime;
+ private AbstractHttpClient httpclient = null;
+
+ /**
+ * Get the singleton SynchronizerTask object.
+ * @return the SynchronizerTask
+ */
+ public static synchronized SynchronizerTask getSynchronizer() {
+ if (synctask == null)
+ synctask = new SynchronizerTask();
+ return synctask;
+ }
+
+ @SuppressWarnings("deprecation")
+ private SynchronizerTask() {
+ logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");
+ rolex = new Timer();
+ spooldir = (new DB()).getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");
+ state = UNKNOWN;
+ doFetch = true; // start off with a fetch
+ nextsynctime = 0;
+
+ logger.info("PROV5000: Sync task starting, server state is UNKNOWN");
+ try {
+ Properties props = (new DB()).getProperties();
+ String type = props.getProperty(Main.KEYSTORE_TYPE_PROPERTY, "jks");
+ String store = props.getProperty(Main.KEYSTORE_PATH_PROPERTY);
+ String pass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY);
+ KeyStore keyStore = KeyStore.getInstance(type);
+ FileInputStream instream = new FileInputStream(new File(store));
+ keyStore.load(instream, pass.toCharArray());
+ instream.close();
+
+ store = props.getProperty(Main.TRUSTSTORE_PATH_PROPERTY);
+ pass = props.getProperty(Main.TRUSTSTORE_PASSWORD_PROPERTY);
+ KeyStore trustStore = null;
+ if (store != null && store.length() > 0) {
+ trustStore = KeyStore.getInstance(KeyStore.getDefaultType());
+ instream = new FileInputStream(new File(store));
+ trustStore.load(instream, pass.toCharArray());
+ instream.close();
+ }
+
+ // We are connecting with the node name, but the certificate will have the CNAME
+ // So we need to accept a non-matching certificate name
+ String keystorepass = props.getProperty(Main.KEYSTORE_PASSWORD_PROPERTY); //itrack.web.att.com/browse/DATARTR-6 for changing hard coded passphase ref
+ AbstractHttpClient hc = new DefaultHttpClient();
+ SSLSocketFactory socketFactory =
+ (trustStore == null)
+ ? new SSLSocketFactory(keyStore, keystorepass)
+ : new SSLSocketFactory(keyStore, keystorepass, trustStore);
+ socketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
+ Scheme sch = new Scheme("https", 443, socketFactory);
+ hc.getConnectionManager().getSchemeRegistry().register(sch);
+ httpclient = hc;
+
+ // Run once every 5 seconds to check DNS, etc.
+ long interval = 0;
+ try {
+ String s = props.getProperty("org.onap.dmaap.datarouter.provserver.sync_interval", "5000");
+ interval = Long.parseLong(s);
+ } catch (NumberFormatException e) {
+ interval = 5000L;
+ }
+ rolex.scheduleAtFixedRate(this, 0L, interval);
+ } catch (Exception e) {
+ logger.warn("PROV5005: Problem starting the synchronizer: "+e);
+ }
+ }
+
+ /**
+ * What is the state of this POD?
+ * @return one of ACTIVE, STANDBY, UNKNOWN
+ */
+ public int getState() {
+ return state;
+ }
+
+ /**
+ * Is this the active POD?
+ * @return true if we are active (the master), false otherwise
+ */
+ public boolean isActive() {
+ return state == ACTIVE;
+ }
+
+ /**
+ * This method is used to signal that another POD (the active POD) has sent us a /fetchProv request,
+ * and that we should re-synchronize with the master.
+ */
+ public void doFetch() {
+ doFetch = true;
+ }
+
+ /**
+ * Runs once a minute in order to <ol>
+ * <li>lookup DNS names,</li>
+ * <li>determine the state of this POD,</li>
+ * <li>if this is a standby POD, and the fetch flag is set, perform a fetch of state from the active POD.</li>
+ * <li>if this is a standby POD, check if there are any new log records to be replicated.</li>
+ * </ol>
+ */
+ @Override
+ public void run() {
+ try {
+ state = lookupState();
+ if (state == STANDBY) {
+ // Only copy provisioning data FROM the active server TO the standby
+ if (doFetch || (System.currentTimeMillis() >= nextsynctime)) {
+ logger.debug("Initiating a sync...");
+ JSONObject jo = readProvisioningJSON();
+ if (jo != null) {
+ doFetch = false;
+ syncFeeds( jo.getJSONArray("feeds"));
+ syncSubs( jo.getJSONArray("subscriptions"));
+ syncGroups( jo.getJSONArray("groups")); //Rally:US708115 - 1610
+ syncParams(jo.getJSONObject("parameters"));
+ // The following will not be present in a version=1.0 provfeed
+ JSONArray ja = jo.optJSONArray("ingress");
+ if (ja != null)
+ syncIngressRoutes(ja);
+ JSONObject j2 = jo.optJSONObject("egress");
+ if (j2 != null)
+ syncEgressRoutes( j2);
+ ja = jo.optJSONArray("routing");
+ if (ja != null)
+ syncNetworkRoutes(ja);
+ }
+ logger.info("PROV5013: Sync completed.");
+ nextsynctime = System.currentTimeMillis() + ONE_HOUR;
+ }
+ } else {
+ // Don't do fetches on non-standby PODs
+ doFetch = false;
+ }
+
+ // Fetch DR logs as needed - server to server
+ LogfileLoader lfl = LogfileLoader.getLoader();
+ if (lfl.isIdle()) {
+ // Only fetch new logs if the loader is waiting for them.
+ logger.trace("Checking for logs to replicate...");
+ RLEBitSet local = lfl.getBitSet();
+ RLEBitSet remote = readRemoteLoglist();
+ remote.andNot(local);
+ if (!remote.isEmpty()) {
+ logger.debug(" Replicating logs: "+remote);
+ replicateDRLogs(remote);
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("PROV0020: Caught exception in SynchronizerTask: "+e);
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * This method is used to lookup the CNAME that points to the active server.
+ * It returns 0 (UNKNOWN), 1(ACTIVE), or 2 (STANDBY) to indicate the state of this server.
+ * @return the current state
+ */
+ private int lookupState() {
+ int newstate = UNKNOWN;
+ try {
+ InetAddress myaddr = InetAddress.getLocalHost();
+ if (logger.isTraceEnabled())
+ logger.trace("My address: "+myaddr);
+ String this_pod = myaddr.getHostName();
+ Set<String> pods = new TreeSet<String>(Arrays.asList(BaseServlet.getPods()));
+ if (pods.contains(this_pod)) {
+ InetAddress pserver = InetAddress.getByName(BaseServlet.active_prov_name);
+ newstate = myaddr.equals(pserver) ? ACTIVE : STANDBY;
+ if (logger.isDebugEnabled() && System.currentTimeMillis() >= next_msg) {
+ logger.debug("Active POD = "+pserver+", Current state is "+stnames[newstate]);
+ next_msg = System.currentTimeMillis() + (5 * 60 * 1000L);
+ }
+ } else {
+ logger.warn("PROV5003: My name ("+this_pod+") is missing from the list of provisioning servers.");
+ }
+ } catch (UnknownHostException e) {
+ logger.warn("PROV5002: Cannot determine the name of this provisioning server.");
+ }
+
+ if (newstate != state)
+ logger.info(String.format("PROV5001: Server state changed from %s to %s", stnames[state], stnames[newstate]));
+ return newstate;
+ }
+ private static long next_msg = 0; // only display the "Current state" msg every 5 mins.
+ /** Synchronize the Feeds in the JSONArray, with the Feeds in the DB. */
+ private void syncFeeds(JSONArray ja) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (int n = 0; n < ja.length(); n++) {
+ try {
+ Feed f = new Feed(ja.getJSONObject(n));
+ coll.add(f);
+ } catch (Exception e) {
+ logger.warn("PROV5004: Invalid object in feed: "+ja.optJSONObject(n));
+ }
+ }
+ if (sync(coll, Feed.getAllFeeds()))
+ BaseServlet.provisioningDataChanged();
+ }
+ /** Synchronize the Subscriptions in the JSONArray, with the Subscriptions in the DB. */
+ private void syncSubs(JSONArray ja) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (int n = 0; n < ja.length(); n++) {
+ try {
+ //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.
+ JSONObject j = ja.getJSONObject(n);
+ j.put("sync", "true");
+ Subscription s = new Subscription(j);
+ coll.add(s);
+ } catch (Exception e) {
+ logger.warn("PROV5004: Invalid object in subscription: "+ja.optJSONObject(n));
+ }
+ }
+ if (sync(coll, Subscription.getAllSubscriptions()))
+ BaseServlet.provisioningDataChanged();
+ }
+
+ /** Rally:US708115 - Synchronize the Groups in the JSONArray, with the Groups in the DB. */
+ private void syncGroups(JSONArray ja) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (int n = 0; n < ja.length(); n++) {
+ try {
+ Group g = new Group(ja.getJSONObject(n));
+ coll.add(g);
+ } catch (Exception e) {
+ logger.warn("PROV5004: Invalid object in subscription: "+ja.optJSONObject(n));
+ }
+ }
+ if (sync(coll, Group.getAllgroups()))
+ BaseServlet.provisioningDataChanged();
+ }
+
+
+ /** Synchronize the Parameters in the JSONObject, with the Parameters in the DB. */
+ private void syncParams(JSONObject jo) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (String k : jo.keySet()) {
+ String v = "";
+ try {
+ v = jo.getString(k);
+ } catch (JSONException e) {
+ try {
+ v = ""+jo.getInt(k);
+ } catch (JSONException e1) {
+ JSONArray ja = jo.getJSONArray(k);
+ for (int i = 0; i < ja.length(); i++) {
+ if (i > 0)
+ v += "|";
+ v += ja.getString(i);
+ }
+ }
+ }
+ coll.add(new Parameters(k, v));
+ }
+ if (sync(coll, Parameters.getParameterCollection())) {
+ BaseServlet.provisioningDataChanged();
+ BaseServlet.provisioningParametersChanged();
+ }
+ }
+ private void syncIngressRoutes(JSONArray ja) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (int n = 0; n < ja.length(); n++) {
+ try {
+ IngressRoute in = new IngressRoute(ja.getJSONObject(n));
+ coll.add(in);
+ } catch (NumberFormatException e) {
+ logger.warn("PROV5004: Invalid object in ingress routes: "+ja.optJSONObject(n));
+ }
+ }
+ if (sync(coll, IngressRoute.getAllIngressRoutes()))
+ BaseServlet.provisioningDataChanged();
+ }
+ private void syncEgressRoutes(JSONObject jo) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (String key : jo.keySet()) {
+ try {
+ int sub = Integer.parseInt(key);
+ String node = jo.getString(key);
+ EgressRoute er = new EgressRoute(sub, node);
+ coll.add(er);
+ } catch (NumberFormatException e) {
+ logger.warn("PROV5004: Invalid subid in egress routes: "+key);
+ } catch (IllegalArgumentException e) {
+ logger.warn("PROV5004: Invalid node name in egress routes: "+key);
+ }
+ }
+ if (sync(coll, EgressRoute.getAllEgressRoutes()))
+ BaseServlet.provisioningDataChanged();
+ }
+ private void syncNetworkRoutes(JSONArray ja) {
+ Collection<Syncable> coll = new ArrayList<Syncable>();
+ for (int n = 0; n < ja.length(); n++) {
+ try {
+ NetworkRoute nr = new NetworkRoute(ja.getJSONObject(n));
+ coll.add(nr);
+ } catch (JSONException e) {
+ logger.warn("PROV5004: Invalid object in network routes: "+ja.optJSONObject(n));
+ }
+ }
+ if (sync(coll, NetworkRoute.getAllNetworkRoutes()))
+ BaseServlet.provisioningDataChanged();
+ }
+ private boolean sync(Collection<? extends Syncable> newc, Collection<? extends Syncable> oldc) {
+ boolean changes = false;
+ try {
+ Map<String, Syncable> newmap = getMap(newc);
+ Map<String, Syncable> oldmap = getMap(oldc);
+ Set<String> union = new TreeSet<String>(newmap.keySet());
+ union.addAll(oldmap.keySet());
+ DB db = new DB();
+ @SuppressWarnings("resource")
+ Connection conn = db.getConnection();
+ for (String n : union) {
+ Syncable newobj = newmap.get(n);
+ Syncable oldobj = oldmap.get(n);
+ if (oldobj == null) {
+ if (logger.isDebugEnabled())
+ logger.debug(" Inserting record: "+newobj);
+ newobj.doInsert(conn);
+ changes = true;
+ } else if (newobj == null) {
+ if (logger.isDebugEnabled())
+ logger.debug(" Deleting record: "+oldobj);
+ oldobj.doDelete(conn);
+ changes = true;
+ } else if (!newobj.equals(oldobj)) {
+ if (logger.isDebugEnabled())
+ logger.debug(" Updating record: "+newobj);
+ newobj.doUpdate(conn);
+
+ /**Rally US708115
+ * Change Ownership of FEED - 1610, Syncronised with secondary DB.
+ * */
+ checkChnageOwner(newobj, oldobj);
+
+ changes = true;
+ }
+ }
+ db.release(conn);
+ } catch (SQLException e) {
+ logger.warn("PROV5009: problem during sync, exception: "+e);
+ e.printStackTrace();
+ }
+ return changes;
+ }
+ private Map<String, Syncable> getMap(Collection<? extends Syncable> c) {
+ Map<String, Syncable> map = new HashMap<String, Syncable>();
+ for (Syncable v : c) {
+ map.put(v.getKey(), v);
+ }
+ return map;
+ }
+
+
+ /**Change owner of FEED/SUBSCRIPTION*/
+ /**Rally US708115
+ * Change Ownership of FEED - 1610
+ *
+ * */
+ private void checkChnageOwner(Syncable newobj, Syncable oldobj) {
+ if(newobj instanceof Feed) {
+ Feed oldfeed = (Feed) oldobj;
+ Feed newfeed = (Feed) newobj;
+
+ if(!oldfeed.getPublisher().equals(newfeed.getPublisher())){
+ logger.info("PROV5013 - Previous publisher: "+oldfeed.getPublisher() +": New publisher-"+newfeed.getPublisher());
+ oldfeed.setPublisher(newfeed.getPublisher());
+ oldfeed.changeOwnerShip();
+ }
+ }
+ else if(newobj instanceof Subscription) {
+ Subscription oldsub = (Subscription) oldobj;
+ Subscription newsub = (Subscription) newobj;
+
+ if(!oldsub.getSubscriber().equals(newsub.getSubscriber())){
+ logger.info("PROV5013 - Previous subscriber: "+oldsub.getSubscriber() +": New subscriber-"+newsub.getSubscriber());
+ oldsub.setSubscriber(newsub.getSubscriber());
+ oldsub.changeOwnerShip();
+ }
+ }
+
+ }
+
+ /**
+ * Issue a GET on the peer POD's /internal/prov/ URL to get a copy of its provisioning data.
+ * @return the provisioning data (as a JONObject)
+ */
+ private synchronized JSONObject readProvisioningJSON() {
+ String url = URLUtilities.generatePeerProvURL();
+ HttpGet get = new HttpGet(url);
+ try {
+ HttpResponse response = httpclient.execute(get);
+ int code = response.getStatusLine().getStatusCode();
+ if (code != HttpServletResponse.SC_OK) {
+ logger.warn("PROV5010: readProvisioningJSON failed, bad error code: "+code);
+ return null;
+ }
+ HttpEntity entity = response.getEntity();
+ String ctype = entity.getContentType().getValue().trim();
+ if (!ctype.equals(BaseServlet.PROVFULL_CONTENT_TYPE1) && !ctype.equals(BaseServlet.PROVFULL_CONTENT_TYPE2)) {
+ logger.warn("PROV5011: readProvisioningJSON failed, bad content type: "+ctype);
+ return null;
+ }
+ return new JSONObject(new JSONTokener(entity.getContent()));
+ } catch (Exception e) {
+ logger.warn("PROV5012: readProvisioningJSON failed, exception: "+e);
+ return null;
+ } finally {
+ get.releaseConnection();
+ }
+ }
+ /**
+ * Issue a GET on the peer POD's /internal/drlogs/ URL to get an RELBitSet representing the
+ * log records available in the remote database.
+ * @return the bitset
+ */
+ private RLEBitSet readRemoteLoglist() {
+ RLEBitSet bs = new RLEBitSet();
+ String url = URLUtilities.generatePeerLogsURL();
+
+ //Fixing if only one Prov is configured, not to give exception to fill logs, return empty bitset.
+ if(url.equals("")) {
+ return bs;
+ }
+ //End of fix.
+
+ HttpGet get = new HttpGet(url);
+ try {
+ HttpResponse response = httpclient.execute(get);
+ int code = response.getStatusLine().getStatusCode();
+ if (code != HttpServletResponse.SC_OK) {
+ logger.warn("PROV5010: readRemoteLoglist failed, bad error code: "+code);
+ return bs;
+ }
+ HttpEntity entity = response.getEntity();
+ String ctype = entity.getContentType().getValue().trim();
+ if (!ctype.equals("text/plain")) {
+ logger.warn("PROV5011: readRemoteLoglist failed, bad content type: "+ctype);
+ return bs;
+ }
+ InputStream is = entity.getContent();
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ int ch = 0;
+ while ((ch = is.read()) >= 0)
+ bos.write(ch);
+ bs.set(bos.toString());
+ is.close();
+ } catch (Exception e) {
+ logger.warn("PROV5012: readRemoteLoglist failed, exception: "+e);
+ return bs;
+ } finally {
+ get.releaseConnection();
+ }
+ return bs;
+ }
+ /**
+ * Issue a POST on the peer POD's /internal/drlogs/ URL to fetch log records available
+ * in the remote database that we wish to copy to the local database.
+ * @param bs the bitset (an RELBitSet) of log records to fetch
+ */
+ private void replicateDRLogs(RLEBitSet bs) {
+ String url = URLUtilities.generatePeerLogsURL();
+ HttpPost post = new HttpPost(url);
+ try {
+ String t = bs.toString();
+ HttpEntity body = new ByteArrayEntity(t.getBytes(), ContentType.create("text/plain"));
+ post.setEntity(body);
+ if (logger.isDebugEnabled())
+ logger.debug("Requesting records: "+t);
+
+ HttpResponse response = httpclient.execute(post);
+ int code = response.getStatusLine().getStatusCode();
+ if (code != HttpServletResponse.SC_OK) {
+ logger.warn("PROV5010: replicateDRLogs failed, bad error code: "+code);
+ return;
+ }
+ HttpEntity entity = response.getEntity();
+ String ctype = entity.getContentType().getValue().trim();
+ if (!ctype.equals("text/plain")) {
+ logger.warn("PROV5011: replicateDRLogs failed, bad content type: "+ctype);
+ return;
+ }
+
+ String spoolname = "" + System.currentTimeMillis();
+ Path tmppath = Paths.get(spooldir, spoolname);
+ Path donepath = Paths.get(spooldir, "IN."+spoolname);
+ Files.copy(entity.getContent(), Paths.get(spooldir, spoolname), StandardCopyOption.REPLACE_EXISTING);
+ Files.move(tmppath, donepath, StandardCopyOption.REPLACE_EXISTING);
+ logger.info("Approximately "+bs.cardinality()+" records replicated.");
+ } catch (Exception e) {
+ logger.warn("PROV5012: replicateDRLogs failed, exception: "+e);
+ } finally {
+ post.releaseConnection();
+ }
+ }
+}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: BaseLogRecord.java,v 1.10 2013/10/29 16:57:57 eby Exp $\r
*/\r
public class BaseLogRecord implements LOGJSONable, Loadable {\r
- protected static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");\r
+ protected static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");\r
\r
- private long eventTime;\r
- private String publishId;\r
- private int feedid;\r
- private String requestUri;\r
- private String method;\r
- private String contentType;\r
- private long contentLength;\r
+ private long eventTime;\r
+ private String publishId;\r
+ private int feedid;\r
+ private String requestUri;\r
+ private String method;\r
+ private String contentType;\r
+ private long contentLength;\r
\r
- protected BaseLogRecord(String[] pp) throws ParseException {\r
-// This throws exceptions occasionally - don't know why.\r
-// Date d = null;\r
-// synchronized (sdf) {\r
-// d = sdf.parse(pp[0]);\r
-// }\r
- Date d = parseDate(pp[0]);\r
- this.eventTime = d.getTime();\r
- this.publishId = pp[2];\r
- this.feedid = Integer.parseInt(pp[3]);\r
- if (pp[1].equals("DLX")) {\r
- this.requestUri = "";\r
- this.method = "GET"; // Note: we need a valid value in this field, even though unused\r
- this.contentType = "";\r
- this.contentLength = Long.parseLong(pp[5]);\r
- } else if (pp[1].equals("PUB") || pp[1].equals("LOG") || pp[1].equals("PBF")) {\r
- this.requestUri = pp[4];\r
- this.method = pp[5];\r
- this.contentType = pp[6];\r
- this.contentLength = Long.parseLong(pp[7]);\r
- } else {\r
- this.requestUri = pp[5];\r
- this.method = pp[6];\r
- this.contentType = pp[7];\r
- this.contentLength = Long.parseLong(pp[8]);\r
- }\r
- }\r
- protected BaseLogRecord(ResultSet rs) throws SQLException {\r
- this.eventTime = rs.getLong("EVENT_TIME");\r
- this.publishId = rs.getString("PUBLISH_ID");\r
- this.feedid = rs.getInt("FEEDID");\r
- this.requestUri = rs.getString("REQURI");\r
- this.method = rs.getString("METHOD");\r
- this.contentType = rs.getString("CONTENT_TYPE");\r
- this.contentLength = rs.getLong("CONTENT_LENGTH");\r
- }\r
- protected Date parseDate(final String s) throws ParseException {\r
- int[] n = new int[7];\r
- int p = 0;\r
- for (int i = 0; i < s.length(); i++) {\r
- char c = s.charAt(i);\r
- if (c < '0' || c > '9') {\r
- p++;\r
- } else {\r
- if (p > n.length)\r
- throw new ParseException("parseDate()", 0);\r
- n[p] = (n[p] * 10) + (c - '0');\r
- }\r
- }\r
- if (p != 7)\r
- throw new ParseException("parseDate()", 1);\r
- Calendar cal = new GregorianCalendar();\r
- cal.set(Calendar.YEAR, n[0]);\r
- cal.set(Calendar.MONTH, n[1]-1);\r
- cal.set(Calendar.DAY_OF_MONTH, n[2]);\r
- cal.set(Calendar.HOUR_OF_DAY, n[3]);\r
- cal.set(Calendar.MINUTE, n[4]);\r
- cal.set(Calendar.SECOND, n[5]);\r
- cal.set(Calendar.MILLISECOND, n[6]);\r
- return cal.getTime();\r
- }\r
- public long getEventTime() {\r
- return eventTime;\r
- }\r
- public void setEventTime(long eventTime) {\r
- this.eventTime = eventTime;\r
- }\r
- public String getPublishId() {\r
- return publishId;\r
- }\r
- public void setPublishId(String publishId) {\r
- this.publishId = publishId;\r
- }\r
- public int getFeedid() {\r
- return feedid;\r
- }\r
- public void setFeedid(int feedid) {\r
- this.feedid = feedid;\r
- }\r
- public String getRequestUri() {\r
- return requestUri;\r
- }\r
- public void setRequestUri(String requestUri) {\r
- this.requestUri = requestUri;\r
- }\r
- public String getMethod() {\r
- return method;\r
- }\r
- public void setMethod(String method) {\r
- this.method = method;\r
- }\r
- public String getContentType() {\r
- return contentType;\r
- }\r
- public void setContentType(String contentType) {\r
- this.contentType = contentType;\r
- }\r
- public long getContentLength() {\r
- return contentLength;\r
- }\r
- public void setContentLength(long contentLength) {\r
- this.contentLength = contentLength;\r
- }\r
- @Override\r
- public LOGJSONObject asJSONObject() {\r
- LOGJSONObject jo = new LOGJSONObject();\r
- String t = "";\r
- synchronized (sdf) {\r
- t = sdf.format(eventTime);\r
- }\r
- jo.put("date", t);\r
- jo.put("publishId", publishId);\r
- jo.put("requestURI", requestUri);\r
- jo.put("method", method);\r
- if (method.equals("PUT")) {\r
- jo.put("contentType", contentType);\r
- jo.put("contentLength", contentLength);\r
- }\r
- return jo;\r
- }\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setLong (2, getEventTime());\r
- ps.setString(3, getPublishId());\r
- ps.setInt (4, getFeedid());\r
- ps.setString(5, getRequestUri());\r
- ps.setString(6, getMethod());\r
- ps.setString(7, getContentType());\r
- ps.setLong (8, getContentLength());\r
- }\r
+ protected BaseLogRecord(String[] pp) throws ParseException {\r
+// This throws exceptions occasionally - don't know why.\r
+// Date d = null;\r
+// synchronized (sdf) {\r
+// d = sdf.parse(pp[0]);\r
+// }\r
+ Date d = parseDate(pp[0]);\r
+ this.eventTime = d.getTime();\r
+ this.publishId = pp[2];\r
+ this.feedid = Integer.parseInt(pp[3]);\r
+ if (pp[1].equals("DLX")) {\r
+ this.requestUri = "";\r
+ this.method = "GET"; // Note: we need a valid value in this field, even though unused\r
+ this.contentType = "";\r
+ this.contentLength = Long.parseLong(pp[5]);\r
+ } else if (pp[1].equals("PUB") || pp[1].equals("LOG") || pp[1].equals("PBF")) {\r
+ this.requestUri = pp[4];\r
+ this.method = pp[5];\r
+ this.contentType = pp[6];\r
+ this.contentLength = Long.parseLong(pp[7]);\r
+ } else {\r
+ this.requestUri = pp[5];\r
+ this.method = pp[6];\r
+ this.contentType = pp[7];\r
+ this.contentLength = Long.parseLong(pp[8]);\r
+ }\r
+ }\r
+ protected BaseLogRecord(ResultSet rs) throws SQLException {\r
+ this.eventTime = rs.getLong("EVENT_TIME");\r
+ this.publishId = rs.getString("PUBLISH_ID");\r
+ this.feedid = rs.getInt("FEEDID");\r
+ this.requestUri = rs.getString("REQURI");\r
+ this.method = rs.getString("METHOD");\r
+ this.contentType = rs.getString("CONTENT_TYPE");\r
+ this.contentLength = rs.getLong("CONTENT_LENGTH");\r
+ }\r
+ protected Date parseDate(final String s) throws ParseException {\r
+ int[] n = new int[7];\r
+ int p = 0;\r
+ for (int i = 0; i < s.length(); i++) {\r
+ char c = s.charAt(i);\r
+ if (c < '0' || c > '9') {\r
+ p++;\r
+ } else {\r
+ if (p > n.length)\r
+ throw new ParseException("parseDate()", 0);\r
+ n[p] = (n[p] * 10) + (c - '0');\r
+ }\r
+ }\r
+ if (p != 7)\r
+ throw new ParseException("parseDate()", 1);\r
+ Calendar cal = new GregorianCalendar();\r
+ cal.set(Calendar.YEAR, n[0]);\r
+ cal.set(Calendar.MONTH, n[1]-1);\r
+ cal.set(Calendar.DAY_OF_MONTH, n[2]);\r
+ cal.set(Calendar.HOUR_OF_DAY, n[3]);\r
+ cal.set(Calendar.MINUTE, n[4]);\r
+ cal.set(Calendar.SECOND, n[5]);\r
+ cal.set(Calendar.MILLISECOND, n[6]);\r
+ return cal.getTime();\r
+ }\r
+ public long getEventTime() {\r
+ return eventTime;\r
+ }\r
+ public void setEventTime(long eventTime) {\r
+ this.eventTime = eventTime;\r
+ }\r
+ public String getPublishId() {\r
+ return publishId;\r
+ }\r
+ public void setPublishId(String publishId) {\r
+ this.publishId = publishId;\r
+ }\r
+ public int getFeedid() {\r
+ return feedid;\r
+ }\r
+ public void setFeedid(int feedid) {\r
+ this.feedid = feedid;\r
+ }\r
+ public String getRequestUri() {\r
+ return requestUri;\r
+ }\r
+ public void setRequestUri(String requestUri) {\r
+ this.requestUri = requestUri;\r
+ }\r
+ public String getMethod() {\r
+ return method;\r
+ }\r
+ public void setMethod(String method) {\r
+ this.method = method;\r
+ }\r
+ public String getContentType() {\r
+ return contentType;\r
+ }\r
+ public void setContentType(String contentType) {\r
+ this.contentType = contentType;\r
+ }\r
+ public long getContentLength() {\r
+ return contentLength;\r
+ }\r
+ public void setContentLength(long contentLength) {\r
+ this.contentLength = contentLength;\r
+ }\r
+ @Override\r
+ public LOGJSONObject asJSONObject() {\r
+ LOGJSONObject jo = new LOGJSONObject();\r
+ String t = "";\r
+ synchronized (sdf) {\r
+ t = sdf.format(eventTime);\r
+ }\r
+ jo.put("date", t);\r
+ jo.put("publishId", publishId);\r
+ jo.put("requestURI", requestUri);\r
+ jo.put("method", method);\r
+ if (method.equals("PUT")) {\r
+ jo.put("contentType", contentType);\r
+ jo.put("contentLength", contentLength);\r
+ }\r
+ return jo;\r
+ }\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setLong (2, getEventTime());\r
+ ps.setString(3, getPublishId());\r
+ ps.setInt (4, getFeedid());\r
+ ps.setString(5, getRequestUri());\r
+ ps.setString(6, getMethod());\r
+ ps.setString(7, getContentType());\r
+ ps.setLong (8, getContentLength());\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: Deleteable.java,v 1.2 2013/05/29 14:44:36 eby Exp $\r
*/\r
public interface Deleteable {\r
- /**\r
- * Delete this object in the DB.\r
- * @param c the JDBC Connection to use\r
- * @return true if the DELETE succeeded, false otherwise\r
- */\r
- public boolean doDelete(Connection c);\r
+ /**\r
+ * Delete this object in the DB.\r
+ * @param c the JDBC Connection to use\r
+ * @return true if the DELETE succeeded, false otherwise\r
+ */\r
+ public boolean doDelete(Connection c);\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: DeliveryExtraRecord.java,v 1.1 2013/10/28 18:06:52 eby Exp $\r
*/\r
public class DeliveryExtraRecord extends BaseLogRecord {\r
- private int subid;\r
- private long contentLength2;\r
+ private int subid;\r
+ private long contentLength2;\r
\r
- public DeliveryExtraRecord(String[] pp) throws ParseException {\r
- super(pp);\r
- this.subid = Integer.parseInt(pp[4]);\r
- this.contentLength2 = Long.parseLong(pp[6]);\r
- }\r
- public DeliveryExtraRecord(ResultSet rs) throws SQLException {\r
- super(rs);\r
- // Note: because this record should be "rare" these fields are mapped to unconventional fields in the DB\r
- this.subid = rs.getInt("DELIVERY_SUBID");\r
- this.contentLength2 = rs.getInt("CONTENT_LENGTH_2");\r
- }\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setString(1, "dlx"); // field 1: type\r
- super.load(ps); // loads fields 2-8\r
- ps.setNull( 9, Types.VARCHAR);\r
- ps.setNull(10, Types.VARCHAR);\r
- ps.setNull(11, Types.VARCHAR);\r
- ps.setNull(12, Types.INTEGER);\r
- ps.setInt (13, subid);\r
- ps.setNull(14, Types.VARCHAR);\r
- ps.setNull(15, Types.INTEGER);\r
- ps.setNull(16, Types.INTEGER);\r
- ps.setNull(17, Types.VARCHAR);\r
- ps.setLong(19, contentLength2);\r
- }\r
+ public DeliveryExtraRecord(String[] pp) throws ParseException {\r
+ super(pp);\r
+ this.subid = Integer.parseInt(pp[4]);\r
+ this.contentLength2 = Long.parseLong(pp[6]);\r
+ }\r
+ public DeliveryExtraRecord(ResultSet rs) throws SQLException {\r
+ super(rs);\r
+ // Note: because this record should be "rare" these fields are mapped to unconventional fields in the DB\r
+ this.subid = rs.getInt("DELIVERY_SUBID");\r
+ this.contentLength2 = rs.getInt("CONTENT_LENGTH_2");\r
+ }\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setString(1, "dlx"); // field 1: type\r
+ super.load(ps); // loads fields 2-8\r
+ ps.setNull( 9, Types.VARCHAR);\r
+ ps.setNull(10, Types.VARCHAR);\r
+ ps.setNull(11, Types.VARCHAR);\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setInt (13, subid);\r
+ ps.setNull(14, Types.VARCHAR);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setLong(19, contentLength2);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Delivery Record, as retrieved from the DB.\r
+ *\r
* @author Robert Eby\r
* @version $Id: DeliveryRecord.java,v 1.9 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class DeliveryRecord extends BaseLogRecord {\r
- private int subid;\r
- private String fileid;\r
- private int result;\r
- private String user;\r
-\r
- public DeliveryRecord(String[] pp) throws ParseException {\r
- super(pp);\r
- String fileid = pp[5];\r
- if (fileid.lastIndexOf('/') >= 0)\r
- fileid = fileid.substring(fileid.lastIndexOf('/')+1);\r
- this.subid = Integer.parseInt(pp[4]);\r
- this.fileid = fileid;\r
- this.result = Integer.parseInt(pp[10]);\r
- this.user = pp[9];\r
- if (this.user != null && this.user.length() > 50)\r
- this.user = this.user.substring(0, 50);\r
- }\r
- public DeliveryRecord(ResultSet rs) throws SQLException {\r
- super(rs);\r
- this.subid = rs.getInt("DELIVERY_SUBID");\r
- this.fileid = rs.getString("DELIVERY_FILEID");\r
- this.result = rs.getInt("RESULT");\r
- this.user = rs.getString("USER");\r
- }\r
- public int getSubid() {\r
- return subid;\r
- }\r
- public void setSubid(int subid) {\r
- this.subid = subid;\r
- }\r
- public String getFileid() {\r
- return fileid;\r
- }\r
- public void setFileid(String fileid) {\r
- this.fileid = fileid;\r
- }\r
- public int getResult() {\r
- return result;\r
- }\r
- public void setResult(int result) {\r
- this.result = result;\r
- }\r
- public String getUser() {\r
- return user;\r
- }\r
- public void setUser(String user) {\r
- this.user = user;\r
- }\r
- \r
- \r
- public LOGJSONObject reOrderObject(LOGJSONObject jo) {\r
- LinkedHashMap<String,Object> logrecordObj = new LinkedHashMap<String,Object>();\r
- \r
- logrecordObj.put("statusCode", jo.get("statusCode"));\r
- logrecordObj.put("deliveryId", jo.get("deliveryId"));\r
- logrecordObj.put("publishId", jo.get("publishId"));\r
- logrecordObj.put("requestURI", jo.get("requestURI"));\r
- //logrecordObj.put("sourceIP", jo.get("sourceIP"));\r
- logrecordObj.put("method", jo.get("method"));\r
- logrecordObj.put("contentType", jo.get("contentType"));\r
- //logrecordObj.put("endpointId", jo.get("endpointId"));\r
- logrecordObj.put("type", jo.get("type"));\r
- logrecordObj.put("date", jo.get("date"));\r
- logrecordObj.put("contentLength", jo.get("contentLength"));\r
-\r
-\r
- LOGJSONObject newjo = new LOGJSONObject(logrecordObj);\r
- return newjo;\r
- }\r
- \r
- @Override\r
- public LOGJSONObject asJSONObject() {\r
- LOGJSONObject jo = super.asJSONObject();\r
- jo.put("type", "del");\r
- jo.put("deliveryId", user);\r
- jo.put("statusCode", result);\r
- \r
- LOGJSONObject newjo = this.reOrderObject(jo);\r
- return newjo;\r
- }\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setString(1, "del"); // field 1: type\r
- super.load(ps); // loads fields 2-8\r
- ps.setNull (9, Types.VARCHAR);\r
- ps.setNull (10, Types.VARCHAR);\r
- ps.setString(11, getUser());\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setInt (13, getSubid());\r
- ps.setString(14, getFileid());\r
- ps.setInt (15, getResult());\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setNull (19, Types.BIGINT);\r
- }\r
+ private int subid;\r
+ private String fileid;\r
+ private int result;\r
+ private String user;\r
+\r
+ public DeliveryRecord(String[] pp) throws ParseException {\r
+ super(pp);\r
+ String fileid = pp[5];\r
+ if (fileid.lastIndexOf('/') >= 0)\r
+ fileid = fileid.substring(fileid.lastIndexOf('/') + 1);\r
+ this.subid = Integer.parseInt(pp[4]);\r
+ this.fileid = fileid;\r
+ this.result = Integer.parseInt(pp[10]);\r
+ this.user = pp[9];\r
+ if (this.user != null && this.user.length() > 50)\r
+ this.user = this.user.substring(0, 50);\r
+ }\r
+\r
+ public DeliveryRecord(ResultSet rs) throws SQLException {\r
+ super(rs);\r
+ this.subid = rs.getInt("DELIVERY_SUBID");\r
+ this.fileid = rs.getString("DELIVERY_FILEID");\r
+ this.result = rs.getInt("RESULT");\r
+ this.user = rs.getString("USER");\r
+ }\r
+\r
+ public int getSubid() {\r
+ return subid;\r
+ }\r
+\r
+ public void setSubid(int subid) {\r
+ this.subid = subid;\r
+ }\r
+\r
+ public String getFileid() {\r
+ return fileid;\r
+ }\r
+\r
+ public void setFileid(String fileid) {\r
+ this.fileid = fileid;\r
+ }\r
+\r
+ public int getResult() {\r
+ return result;\r
+ }\r
+\r
+ public void setResult(int result) {\r
+ this.result = result;\r
+ }\r
+\r
+ public String getUser() {\r
+ return user;\r
+ }\r
+\r
+ public void setUser(String user) {\r
+ this.user = user;\r
+ }\r
+\r
+\r
+ public LOGJSONObject reOrderObject(LOGJSONObject jo) {\r
+ LinkedHashMap<String, Object> logrecordObj = new LinkedHashMap<String, Object>();\r
+\r
+ logrecordObj.put("statusCode", jo.get("statusCode"));\r
+ logrecordObj.put("deliveryId", jo.get("deliveryId"));\r
+ logrecordObj.put("publishId", jo.get("publishId"));\r
+ logrecordObj.put("requestURI", jo.get("requestURI"));\r
+ //logrecordObj.put("sourceIP", jo.get("sourceIP"));\r
+ logrecordObj.put("method", jo.get("method"));\r
+ logrecordObj.put("contentType", jo.get("contentType"));\r
+ //logrecordObj.put("endpointId", jo.get("endpointId"));\r
+ logrecordObj.put("type", jo.get("type"));\r
+ logrecordObj.put("date", jo.get("date"));\r
+ logrecordObj.put("contentLength", jo.get("contentLength"));\r
+\r
+\r
+ LOGJSONObject newjo = new LOGJSONObject(logrecordObj);\r
+ return newjo;\r
+ }\r
+\r
+ @Override\r
+ public LOGJSONObject asJSONObject() {\r
+ LOGJSONObject jo = super.asJSONObject();\r
+ jo.put("type", "del");\r
+ jo.put("deliveryId", user);\r
+ jo.put("statusCode", result);\r
+\r
+ LOGJSONObject newjo = this.reOrderObject(jo);\r
+ return newjo;\r
+ }\r
+\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setString(1, "del"); // field 1: type\r
+ super.load(ps); // loads fields 2-8\r
+ ps.setNull(9, Types.VARCHAR);\r
+ ps.setNull(10, Types.VARCHAR);\r
+ ps.setString(11, getUser());\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setInt(13, getSubid());\r
+ ps.setString(14, getFileid());\r
+ ps.setInt(15, getResult());\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setNull(19, Types.BIGINT);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: EgressRoute.java,v 1.3 2013/12/16 20:30:23 eby Exp $\r
*/\r
public class EgressRoute extends NodeClass implements Comparable<EgressRoute> {\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private final int subid;\r
- private final int nodeid;\r
-\r
- /**\r
- * Get a set of all Egress Routes in the DB. The set is sorted according to the natural sorting order\r
- * of the routes (based on the subscription ID in each route).\r
- * @return the sorted set\r
- */\r
- public static SortedSet<EgressRoute> getAllEgressRoutes() {\r
- SortedSet<EgressRoute> set = new TreeSet<EgressRoute>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select SUBID, NODEID from EGRESS_ROUTES");\r
- while (rs.next()) {\r
- int subid = rs.getInt("SUBID");\r
- int nodeid = rs.getInt("NODEID");\r
- set.add(new EgressRoute(subid, nodeid));\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return set;\r
- }\r
- /**\r
- * Get a single Egress Route for the subscription <i>sub</i>.\r
- * @param sub the subscription to lookup\r
- * @return an EgressRoute, or null if there is no route for this subscription\r
- */\r
- public static EgressRoute getEgressRoute(int sub) {\r
- EgressRoute v = null;\r
- PreparedStatement ps = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- String sql = "select NODEID from EGRESS_ROUTES where SUBID = ?";\r
- ps = conn.prepareStatement(sql);\r
- ps.setInt(1, sub);\r
- ResultSet rs = ps.executeQuery();\r
- if (rs.next()) {\r
- int node = rs.getInt("NODEID");\r
- v = new EgressRoute(sub, node);\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return v;\r
- }\r
-\r
- public EgressRoute(int subid, int nodeid) throws IllegalArgumentException {\r
- this.subid = subid;\r
- this.nodeid = nodeid;\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private final int subid;\r
+ private final int nodeid;\r
+\r
+ /**\r
+ * Get a set of all Egress Routes in the DB. The set is sorted according to the natural sorting order\r
+ * of the routes (based on the subscription ID in each route).\r
+ *\r
+ * @return the sorted set\r
+ */\r
+ public static SortedSet<EgressRoute> getAllEgressRoutes() {\r
+ SortedSet<EgressRoute> set = new TreeSet<EgressRoute>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select SUBID, NODEID from EGRESS_ROUTES");\r
+ while (rs.next()) {\r
+ int subid = rs.getInt("SUBID");\r
+ int nodeid = rs.getInt("NODEID");\r
+ set.add(new EgressRoute(subid, nodeid));\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return set;\r
+ }\r
+\r
+ /**\r
+ * Get a single Egress Route for the subscription <i>sub</i>.\r
+ *\r
+ * @param sub the subscription to lookup\r
+ * @return an EgressRoute, or null if there is no route for this subscription\r
+ */\r
+ public static EgressRoute getEgressRoute(int sub) {\r
+ EgressRoute v = null;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ String sql = "select NODEID from EGRESS_ROUTES where SUBID = ?";\r
+ ps = conn.prepareStatement(sql);\r
+ ps.setInt(1, sub);\r
+ ResultSet rs = ps.executeQuery();\r
+ if (rs.next()) {\r
+ int node = rs.getInt("NODEID");\r
+ v = new EgressRoute(sub, node);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return v;\r
+ }\r
+\r
+ public EgressRoute(int subid, int nodeid) throws IllegalArgumentException {\r
+ this.subid = subid;\r
+ this.nodeid = nodeid;\r
// Note: unlike for Feeds, it subscriptions can be removed from the tables, so it is\r
// possible that an orphan ERT entry can exist if a sub is removed.\r
-// if (Subscription.getSubscriptionById(subid) == null)\r
-// throw new IllegalArgumentException("No such subscription: "+subid);\r
- }\r
-\r
- public EgressRoute(int subid, String node) throws IllegalArgumentException {\r
- this(subid, lookupNodeName(node));\r
- }\r
-\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "delete from EGRESS_ROUTES where SUBID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, subid);\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public boolean doInsert(Connection c) {\r
- boolean rv = false;\r
- PreparedStatement ps = null;\r
- try {\r
- // Create the NETWORK_ROUTES row\r
- String sql = "insert into EGRESS_ROUTES (SUBID, NODEID) values (?, ?)";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, this.subid);\r
- ps.setInt(2, this.nodeid);\r
- ps.execute();\r
- ps.close();\r
- rv = true;\r
- } catch (SQLException e) {\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "update EGRESS_ROUTES set NODEID = ? where SUBID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, nodeid);\r
- ps.setInt(2, subid);\r
- ps.executeUpdate();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put(""+subid, lookupNodeID(nodeid));\r
- return jo;\r
- }\r
-\r
- @Override\r
- public String getKey() {\r
- return ""+subid;\r
- }\r
-\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof EgressRoute))\r
- return false;\r
- EgressRoute on = (EgressRoute)obj;\r
- return (subid == on.subid) && (nodeid == on.nodeid);\r
- }\r
-\r
- @Override\r
- public int compareTo(EgressRoute o) {\r
- return this.subid - o.subid;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return String.format("EGRESS: sub=%d, node=%d", subid, nodeid);\r
- }\r
+// if (Subscription.getSubscriptionById(subid) == null)\r
+// throw new IllegalArgumentException("No such subscription: "+subid);\r
+ }\r
+\r
+ public EgressRoute(int subid, String node) throws IllegalArgumentException {\r
+ this(subid, lookupNodeName(node));\r
+ }\r
+\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "delete from EGRESS_ROUTES where SUBID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, subid);\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doInsert(Connection c) {\r
+ boolean rv = false;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ // Create the NETWORK_ROUTES row\r
+ String sql = "insert into EGRESS_ROUTES (SUBID, NODEID) values (?, ?)";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, this.subid);\r
+ ps.setInt(2, this.nodeid);\r
+ ps.execute();\r
+ ps.close();\r
+ rv = true;\r
+ } catch (SQLException e) {\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "update EGRESS_ROUTES set NODEID = ? where SUBID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, nodeid);\r
+ ps.setInt(2, subid);\r
+ ps.executeUpdate();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("" + subid, lookupNodeID(nodeid));\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public String getKey() {\r
+ return "" + subid;\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof EgressRoute))\r
+ return false;\r
+ EgressRoute on = (EgressRoute) obj;\r
+ return (subid == on.subid) && (nodeid == on.nodeid);\r
+ }\r
+\r
+ @Override\r
+ public int compareTo(EgressRoute o) {\r
+ return this.subid - o.subid;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return String.format("EGRESS: sub=%d, node=%d", subid, nodeid);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: EventLogRecord.java,v 1.1 2013/04/26 21:00:25 eby Exp $\r
*/\r
public class EventLogRecord {\r
- private final String ipaddr; // Who\r
- private final String behalfof;\r
- private final String clientSubject;\r
- private final String method; // What\r
- private final String servlet;\r
- private int result; // How\r
- private String message;\r
+ private final String ipaddr; // Who\r
+ private final String behalfof;\r
+ private final String clientSubject;\r
+ private final String method; // What\r
+ private final String servlet;\r
+ private int result; // How\r
+ private String message;\r
+\r
+ public EventLogRecord(HttpServletRequest request) {\r
+ // Who is making the request\r
+ this.ipaddr = request.getRemoteAddr();\r
+ String s = request.getHeader(BaseServlet.BEHALF_HEADER);\r
+ this.behalfof = (s != null) ? s : "";\r
+ X509Certificate certs[] = (X509Certificate[]) request.getAttribute(BaseServlet.CERT_ATTRIBUTE);\r
+ this.clientSubject = (certs != null && certs.length > 0)\r
+ ? certs[0].getSubjectX500Principal().getName() : "";\r
+\r
+ // What is the request\r
+ this.method = request.getMethod();\r
+ this.servlet = request.getServletPath();\r
+\r
+ // How was it dealt with\r
+ this.result = -1;\r
+ this.message = "";\r
+ }\r
\r
- public EventLogRecord(HttpServletRequest request) {\r
- // Who is making the request\r
- this.ipaddr = request.getRemoteAddr();\r
- String s = request.getHeader(BaseServlet.BEHALF_HEADER);\r
- this.behalfof = (s != null) ? s : "";\r
- X509Certificate certs[] = (X509Certificate[]) request.getAttribute(BaseServlet.CERT_ATTRIBUTE);\r
- this.clientSubject = (certs != null && certs.length > 0)\r
- ? certs[0].getSubjectX500Principal().getName() : "";\r
+ public void setResult(int result) {\r
+ this.result = result;\r
+ }\r
\r
- // What is the request\r
- this.method = request.getMethod();\r
- this.servlet = request.getServletPath();\r
+ public void setMessage(String message) {\r
+ this.message = message;\r
+ }\r
\r
- // How was it dealt with\r
- this.result = -1;\r
- this.message = "";\r
- }\r
- public void setResult(int result) {\r
- this.result = result;\r
- }\r
- public void setMessage(String message) {\r
- this.message = message;\r
- }\r
- @Override\r
- public String toString() {\r
- return String.format(\r
- "%s %s \"%s\" %s %s %d \"%s\"",\r
- ipaddr, behalfof, clientSubject,\r
- method, servlet,\r
- result, message\r
- );\r
- }\r
+ @Override\r
+ public String toString() {\r
+ return String.format(\r
+ "%s %s \"%s\" %s %s %d \"%s\"",\r
+ ipaddr, behalfof, clientSubject,\r
+ method, servlet,\r
+ result, message\r
+ );\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Expiry Record, as retrieved from the DB.\r
+ *\r
* @author Robert Eby\r
* @version $Id: ExpiryRecord.java,v 1.4 2013/10/28 18:06:52 eby Exp $\r
*/\r
public class ExpiryRecord extends BaseLogRecord {\r
- private int subid;\r
- private String fileid;\r
- private int attempts;\r
- private String reason;\r
-\r
- public ExpiryRecord(String[] pp) throws ParseException {\r
- super(pp);\r
- String fileid = pp[5];\r
- if (fileid.lastIndexOf('/') >= 0)\r
- fileid = fileid.substring(fileid.lastIndexOf('/')+1);\r
- this.subid = Integer.parseInt(pp[4]);\r
- this.fileid = fileid;\r
- this.attempts = Integer.parseInt(pp[10]);\r
- this.reason = pp[9];\r
- if (!reason.equals("notRetryable") && !reason.equals("retriesExhausted") && !reason.equals("diskFull"))\r
- this.reason = "other";\r
- }\r
- public ExpiryRecord(ResultSet rs) throws SQLException {\r
- super(rs);\r
- this.subid = rs.getInt("DELIVERY_SUBID");\r
- this.fileid = rs.getString("DELIVERY_FILEID");\r
- this.attempts = rs.getInt("ATTEMPTS");\r
- this.reason = rs.getString("REASON");\r
- }\r
-\r
- public int getSubid() {\r
- return subid;\r
- }\r
-\r
- public void setSubid(int subid) {\r
- this.subid = subid;\r
- }\r
-\r
- public String getFileid() {\r
- return fileid;\r
- }\r
-\r
- public void setFileid(String fileid) {\r
- this.fileid = fileid;\r
- }\r
-\r
- public int getAttempts() {\r
- return attempts;\r
- }\r
-\r
- public void setAttempts(int attempts) {\r
- this.attempts = attempts;\r
- }\r
-\r
- public String getReason() {\r
- return reason;\r
- }\r
-\r
- public void setReason(String reason) {\r
- this.reason = reason;\r
- }\r
- \r
- public LOGJSONObject reOrderObject(LOGJSONObject jo) {\r
- LinkedHashMap<String,Object> logrecordObj = new LinkedHashMap<String,Object>();\r
- \r
- logrecordObj.put("expiryReason", jo.get("expiryReason"));\r
- logrecordObj.put("publishId", jo.get("publishId"));\r
- logrecordObj.put("attempts", jo.get("attempts"));\r
- logrecordObj.put("requestURI", jo.get("requestURI"));\r
- logrecordObj.put("method", jo.get("method"));\r
- logrecordObj.put("contentType", jo.get("contentType"));\r
- logrecordObj.put("type", jo.get("type"));\r
- logrecordObj.put("date", jo.get("date"));\r
- logrecordObj.put("contentLength", jo.get("contentLength"));\r
-\r
- LOGJSONObject newjo = new LOGJSONObject(logrecordObj);\r
- return newjo;\r
- }\r
- \r
- @Override\r
- public LOGJSONObject asJSONObject() {\r
- LOGJSONObject jo = super.asJSONObject();\r
- jo.put("type", "exp");\r
- jo.put("expiryReason", reason);\r
- jo.put("attempts", attempts);\r
- \r
- LOGJSONObject newjo = this.reOrderObject(jo);\r
- return newjo;\r
- }\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setString(1, "exp"); // field 1: type\r
- super.load(ps); // loads fields 2-8\r
- ps.setNull (9, Types.VARCHAR);\r
- ps.setNull (10, Types.VARCHAR);\r
- ps.setNull (11, Types.VARCHAR);\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setInt (13, getSubid());\r
- ps.setString(14, getFileid());\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setInt (16, getAttempts());\r
- ps.setString(17, getReason());\r
- ps.setNull (19, Types.BIGINT);\r
- }\r
+ private int subid;\r
+ private String fileid;\r
+ private int attempts;\r
+ private String reason;\r
+\r
+ public ExpiryRecord(String[] pp) throws ParseException {\r
+ super(pp);\r
+ String fileid = pp[5];\r
+ if (fileid.lastIndexOf('/') >= 0)\r
+ fileid = fileid.substring(fileid.lastIndexOf('/') + 1);\r
+ this.subid = Integer.parseInt(pp[4]);\r
+ this.fileid = fileid;\r
+ this.attempts = Integer.parseInt(pp[10]);\r
+ this.reason = pp[9];\r
+ if (!reason.equals("notRetryable") && !reason.equals("retriesExhausted") && !reason.equals("diskFull"))\r
+ this.reason = "other";\r
+ }\r
+\r
+ public ExpiryRecord(ResultSet rs) throws SQLException {\r
+ super(rs);\r
+ this.subid = rs.getInt("DELIVERY_SUBID");\r
+ this.fileid = rs.getString("DELIVERY_FILEID");\r
+ this.attempts = rs.getInt("ATTEMPTS");\r
+ this.reason = rs.getString("REASON");\r
+ }\r
+\r
+ public int getSubid() {\r
+ return subid;\r
+ }\r
+\r
+ public void setSubid(int subid) {\r
+ this.subid = subid;\r
+ }\r
+\r
+ public String getFileid() {\r
+ return fileid;\r
+ }\r
+\r
+ public void setFileid(String fileid) {\r
+ this.fileid = fileid;\r
+ }\r
+\r
+ public int getAttempts() {\r
+ return attempts;\r
+ }\r
+\r
+ public void setAttempts(int attempts) {\r
+ this.attempts = attempts;\r
+ }\r
+\r
+ public String getReason() {\r
+ return reason;\r
+ }\r
+\r
+ public void setReason(String reason) {\r
+ this.reason = reason;\r
+ }\r
+\r
+ public LOGJSONObject reOrderObject(LOGJSONObject jo) {\r
+ LinkedHashMap<String, Object> logrecordObj = new LinkedHashMap<String, Object>();\r
+\r
+ logrecordObj.put("expiryReason", jo.get("expiryReason"));\r
+ logrecordObj.put("publishId", jo.get("publishId"));\r
+ logrecordObj.put("attempts", jo.get("attempts"));\r
+ logrecordObj.put("requestURI", jo.get("requestURI"));\r
+ logrecordObj.put("method", jo.get("method"));\r
+ logrecordObj.put("contentType", jo.get("contentType"));\r
+ logrecordObj.put("type", jo.get("type"));\r
+ logrecordObj.put("date", jo.get("date"));\r
+ logrecordObj.put("contentLength", jo.get("contentLength"));\r
+\r
+ LOGJSONObject newjo = new LOGJSONObject(logrecordObj);\r
+ return newjo;\r
+ }\r
+\r
+ @Override\r
+ public LOGJSONObject asJSONObject() {\r
+ LOGJSONObject jo = super.asJSONObject();\r
+ jo.put("type", "exp");\r
+ jo.put("expiryReason", reason);\r
+ jo.put("attempts", attempts);\r
+\r
+ LOGJSONObject newjo = this.reOrderObject(jo);\r
+ return newjo;\r
+ }\r
+\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setString(1, "exp"); // field 1: type\r
+ super.load(ps); // loads fields 2-8\r
+ ps.setNull(9, Types.VARCHAR);\r
+ ps.setNull(10, Types.VARCHAR);\r
+ ps.setNull(11, Types.VARCHAR);\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setInt(13, getSubid());\r
+ ps.setString(14, getFileid());\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setInt(16, getAttempts());\r
+ ps.setString(17, getReason());\r
+ ps.setNull(19, Types.BIGINT);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Feed. Feeds can be retrieved from the DB, or stored/updated in the DB.\r
+ *\r
* @author Robert Eby\r
* @version $Id: Feed.java,v 1.13 2013/10/28 18:06:52 eby Exp $\r
*/\r
public class Feed extends Syncable {\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private static int next_feedid = getMaxFeedID() + 1;\r
-\r
- private int feedid;\r
- private int groupid; //New field is added - Groups feature Rally:US708115 - 1610\r
- private String name;\r
- private String version;\r
- private String description;\r
- private String business_description; // New field is added - Groups feature Rally:US708102 - 1610\r
- private FeedAuthorization authorization;\r
- private String publisher;\r
- private FeedLinks links;\r
- private boolean deleted;\r
- private boolean suspended;\r
- private Date last_mod;\r
- private Date created_date;\r
-\r
- /**\r
- * Check if a feed ID is valid.\r
- * @param id the Feed ID\r
- * @return true if it is valid\r
- */\r
- @SuppressWarnings("resource")\r
- public static boolean isFeedValid(int id) {\r
- int count = 0;\r
- try {\r
- DB db = new DB();\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select COUNT(*) from FEEDS where FEEDID = " + id);\r
- if (rs.next()) {\r
- count = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return count != 0;\r
- }\r
- /**\r
- * Get a specific feed from the DB, based upon its ID.\r
- * @param id the Feed ID\r
- * @return the Feed object, or null if it does not exist\r
- */\r
- public static Feed getFeedById(int id) {\r
- String sql = "select * from FEEDS where FEEDID = " + id;\r
- return getFeedBySQL(sql);\r
- }\r
- /**\r
- * Get a specific feed from the DB, based upon its name and version.\r
- * @param name the name of the Feed\r
- * @param version the version of the Feed\r
- * @return the Feed object, or null if it does not exist\r
- */\r
- public static Feed getFeedByNameVersion(String name, String version) {\r
- name = name.replaceAll("'", "''");\r
- version = version.replaceAll("'", "''");\r
- String sql = "select * from FEEDS where NAME = '" + name + "' and VERSION ='" + version + "'";\r
- return getFeedBySQL(sql);\r
- }\r
- /**\r
- * Return a count of the number of active feeds in the DB.\r
- * @return the count\r
- */\r
- public static int countActiveFeeds() {\r
- int count = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select count(*) from FEEDS where DELETED = 0");\r
- if (rs.next()) {\r
- count = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.info("countActiveFeeds: "+e.getMessage());\r
- e.printStackTrace();\r
- }\r
- return count;\r
- }\r
- public static int getMaxFeedID() {\r
- int max = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select MAX(feedid) from FEEDS");\r
- if (rs.next()) {\r
- max = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.info("getMaxFeedID: "+e.getMessage());\r
- e.printStackTrace();\r
- }\r
- return max;\r
- }\r
- public static Collection<Feed> getAllFeeds() {\r
- Map<Integer, Feed> map = new HashMap<Integer, Feed>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select * from FEEDS");\r
- while (rs.next()) {\r
- Feed feed = new Feed(rs);\r
- map.put(feed.getFeedid(), feed);\r
- }\r
- rs.close();\r
-\r
- String sql = "select * from FEED_ENDPOINT_IDS";\r
- rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- int id = rs.getInt("FEEDID");\r
- Feed feed = map.get(id);\r
- if (feed != null) {\r
- FeedEndpointID epi = new FeedEndpointID(rs);\r
- Collection<FeedEndpointID> ecoll = feed.getAuthorization().getEndpoint_ids();\r
- ecoll.add(epi);\r
- }\r
- }\r
- rs.close();\r
-\r
- sql = "select * from FEED_ENDPOINT_ADDRS";\r
- rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- int id = rs.getInt("FEEDID");\r
- Feed feed = map.get(id);\r
- if (feed != null) {\r
- Collection<String> acoll = feed.getAuthorization().getEndpoint_addrs();\r
- acoll.add(rs.getString("ADDR"));\r
- }\r
- }\r
- rs.close();\r
-\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return map.values();\r
- }\r
- public static List<String> getFilteredFeedUrlList(final String name, final String val) {\r
- List<String> list = new ArrayList<String>();\r
- String sql = "select SELF_LINK from FEEDS where DELETED = 0";\r
- if (name.equals("name")) {\r
- sql += " and NAME = ?";\r
- } else if (name.equals("publ")) {\r
- sql += " and PUBLISHER = ?";\r
- } else if (name.equals("subs")) {\r
- sql = "select distinct FEEDS.SELF_LINK from FEEDS, SUBSCRIPTIONS " +\r
- "where DELETED = 0 " +\r
- "and FEEDS.FEEDID = SUBSCRIPTIONS.FEEDID " +\r
- "and SUBSCRIPTIONS.SUBSCRIBER = ?";\r
- }\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(sql);\r
- if (sql.indexOf('?') >= 0)\r
- ps.setString(1, val);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- String t = rs.getString(1);\r
- list.add(t.trim());\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return list;\r
- }\r
- @SuppressWarnings("resource")\r
- private static Feed getFeedBySQL(String sql) {\r
- Feed feed = null;\r
- try {\r
- DB db = new DB();\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- if (rs.next()) {\r
- feed = new Feed(rs);\r
- rs.close();\r
-\r
- sql = "select * from FEED_ENDPOINT_IDS where FEEDID = " + feed.feedid;\r
- rs = stmt.executeQuery(sql);\r
- Collection<FeedEndpointID> ecoll = feed.getAuthorization().getEndpoint_ids();\r
- while (rs.next()) {\r
- FeedEndpointID epi = new FeedEndpointID(rs);\r
- ecoll.add(epi);\r
- }\r
- rs.close();\r
-\r
- sql = "select * from FEED_ENDPOINT_ADDRS where FEEDID = " + feed.feedid;\r
- rs = stmt.executeQuery(sql);\r
- Collection<String> acoll = feed.getAuthorization().getEndpoint_addrs();\r
- while (rs.next()) {\r
- acoll.add(rs.getString("ADDR"));\r
- }\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return feed;\r
- }\r
-\r
- public Feed() {\r
- this("", "", "","");\r
- }\r
-\r
- public Feed(String name, String version, String desc,String business_description) {\r
- this.feedid = -1;\r
- this.groupid = -1; //New field is added - Groups feature Rally:US708115 - 1610\r
- this.name = name;\r
- this.version = version;\r
- this.description = desc;\r
- this.business_description=business_description; // New field is added - Groups feature Rally:US708102 - 1610\r
- this.authorization = new FeedAuthorization();\r
- this.publisher = "";\r
- this.links = new FeedLinks();\r
- this.deleted = false;\r
- this.suspended = false;\r
- this.last_mod = new Date();\r
- this.created_date = new Date();\r
- }\r
- public Feed(ResultSet rs) throws SQLException {\r
- this.feedid = rs.getInt("FEEDID");\r
- this.groupid = rs.getInt("GROUPID"); //New field is added - Groups feature Rally:US708115 - 1610\r
- this.name = rs.getString("NAME");\r
- this.version = rs.getString("VERSION");\r
- this.description = rs.getString("DESCRIPTION");\r
- this.business_description=rs.getString("BUSINESS_DESCRIPTION"); // New field is added - Groups feature Rally:US708102 - 1610\r
- this.authorization = new FeedAuthorization();\r
- this.authorization.setClassification(rs.getString("AUTH_CLASS"));\r
- this.publisher = rs.getString("PUBLISHER");\r
- this.links = new FeedLinks();\r
- this.links.setSelf(rs.getString("SELF_LINK"));\r
- this.links.setPublish(rs.getString("PUBLISH_LINK"));\r
- this.links.setSubscribe(rs.getString("SUBSCRIBE_LINK"));\r
- this.links.setLog(rs.getString("LOG_LINK"));\r
- this.deleted = rs.getBoolean("DELETED");\r
- this.suspended = rs.getBoolean("SUSPENDED");\r
- this.last_mod = rs.getDate("LAST_MOD");\r
- this.created_date = rs.getTimestamp("CREATED_DATE");\r
- }\r
- public Feed(JSONObject jo) throws InvalidObjectException {\r
- this("", "", "","");\r
- try {\r
- // The JSONObject is assumed to contain a vnd.att-dr.feed representation\r
- this.feedid = jo.optInt("feedid", -1);\r
- this.groupid = jo.optInt("groupid"); //New field is added - Groups feature Rally:US708115 - 1610\r
- this.name = jo.getString("name");\r
- if (name.length() > 255)\r
- throw new InvalidObjectException("name field is too long");\r
- this.version = jo.getString("version");\r
- if (version.length() > 20)\r
- throw new InvalidObjectException("version field is too long");\r
- this.description = jo.optString("description");\r
- this.business_description = jo.optString("business_description"); // New field is added - Groups feature Rally:US708102 - 1610\r
- if (description.length() > 1000)\r
- throw new InvalidObjectException("technical description field is too long");\r
- \r
- if (business_description.length() > 1000) // New field is added - Groups feature Rally:US708102 - 1610\r
- throw new InvalidObjectException("business description field is too long");\r
-\r
- this.authorization = new FeedAuthorization();\r
- JSONObject jauth = jo.getJSONObject("authorization");\r
- this.authorization.setClassification(jauth.getString("classification"));\r
- if (this.authorization.getClassification().length() > 32)\r
- throw new InvalidObjectException("classification field is too long");\r
- JSONArray ja = jauth.getJSONArray("endpoint_ids");\r
- for (int i = 0; i < ja.length(); i++) {\r
- JSONObject id = ja.getJSONObject(i);\r
- FeedEndpointID fid = new FeedEndpointID(id.getString("id"), id.getString("password"));\r
- if (fid.getId().length() > 20)\r
- throw new InvalidObjectException("id field is too long ("+fid.getId()+")");\r
- if (fid.getPassword().length() > 32)\r
- throw new InvalidObjectException("password field is too long ("+fid.getPassword()+")");\r
- this.authorization.getEndpoint_ids().add(fid);\r
- }\r
- if (this.authorization.getEndpoint_ids().size() < 1)\r
- throw new InvalidObjectException("need to specify at least one endpoint_id");\r
- ja = jauth.getJSONArray("endpoint_addrs");\r
- for (int i = 0; i < ja.length(); i++) {\r
- String addr = ja.getString(i);\r
- if (!JSONUtilities.validIPAddrOrSubnet(addr))\r
- throw new InvalidObjectException("bad IP addr or subnet mask: "+addr);\r
- this.authorization.getEndpoint_addrs().add(addr);\r
- }\r
-\r
- this.publisher = jo.optString("publisher", "");\r
- this.deleted = jo.optBoolean("deleted", false);\r
- this.suspended = jo.optBoolean("suspend", false);\r
- JSONObject jol = jo.optJSONObject("links");\r
- this.links = (jol == null) ? (new FeedLinks()) : (new FeedLinks(jol));\r
- } catch (InvalidObjectException e) {\r
- throw e;\r
- } catch (Exception e) {\r
- throw new InvalidObjectException("invalid JSON: "+e.getMessage());\r
- }\r
- }\r
- public int getFeedid() {\r
- return feedid;\r
- }\r
- public void setFeedid(int feedid) {\r
- this.feedid = feedid;\r
-\r
- // Create link URLs\r
- FeedLinks fl = getLinks();\r
- fl.setSelf(URLUtilities.generateFeedURL(feedid));\r
- fl.setPublish(URLUtilities.generatePublishURL(feedid));\r
- fl.setSubscribe(URLUtilities.generateSubscribeURL(feedid));\r
- fl.setLog(URLUtilities.generateFeedLogURL(feedid));\r
- }\r
- \r
- //new getter setters for groups- Rally:US708115 - 1610\r
- public int getGroupid() {\r
- return groupid;\r
- }\r
-\r
- public void setGroupid(int groupid) {\r
- this.groupid = groupid;\r
- }\r
- \r
- public String getName() {\r
- return name;\r
- }\r
- public void setName(String name) {\r
- this.name = name;\r
- }\r
- public String getVersion() {\r
- return version;\r
- }\r
- public void setVersion(String version) {\r
- this.version = version;\r
- }\r
- public String getDescription() {\r
- return description;\r
- }\r
- public void setDescription(String description) {\r
- this.description = description;\r
- }\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private static int next_feedid = getMaxFeedID() + 1;\r
+\r
+ private int feedid;\r
+ private int groupid; //New field is added - Groups feature Rally:US708115 - 1610\r
+ private String name;\r
+ private String version;\r
+ private String description;\r
+ private String business_description; // New field is added - Groups feature Rally:US708102 - 1610\r
+ private FeedAuthorization authorization;\r
+ private String publisher;\r
+ private FeedLinks links;\r
+ private boolean deleted;\r
+ private boolean suspended;\r
+ private Date last_mod;\r
+ private Date created_date;\r
+\r
+ /**\r
+ * Check if a feed ID is valid.\r
+ *\r
+ * @param id the Feed ID\r
+ * @return true if it is valid\r
+ */\r
+ @SuppressWarnings("resource")\r
+ public static boolean isFeedValid(int id) {\r
+ int count = 0;\r
+ try {\r
+ DB db = new DB();\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select COUNT(*) from FEEDS where FEEDID = " + id);\r
+ if (rs.next()) {\r
+ count = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return count != 0;\r
+ }\r
+\r
+ /**\r
+ * Get a specific feed from the DB, based upon its ID.\r
+ *\r
+ * @param id the Feed ID\r
+ * @return the Feed object, or null if it does not exist\r
+ */\r
+ public static Feed getFeedById(int id) {\r
+ String sql = "select * from FEEDS where FEEDID = " + id;\r
+ return getFeedBySQL(sql);\r
+ }\r
+\r
+ /**\r
+ * Get a specific feed from the DB, based upon its name and version.\r
+ *\r
+ * @param name the name of the Feed\r
+ * @param version the version of the Feed\r
+ * @return the Feed object, or null if it does not exist\r
+ */\r
+ public static Feed getFeedByNameVersion(String name, String version) {\r
+ name = name.replaceAll("'", "''");\r
+ version = version.replaceAll("'", "''");\r
+ String sql = "select * from FEEDS where NAME = '" + name + "' and VERSION ='" + version + "'";\r
+ return getFeedBySQL(sql);\r
+ }\r
+\r
+ /**\r
+ * Return a count of the number of active feeds in the DB.\r
+ *\r
+ * @return the count\r
+ */\r
+ public static int countActiveFeeds() {\r
+ int count = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select count(*) from FEEDS where DELETED = 0");\r
+ if (rs.next()) {\r
+ count = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.info("countActiveFeeds: " + e.getMessage());\r
+ e.printStackTrace();\r
+ }\r
+ return count;\r
+ }\r
+\r
+ public static int getMaxFeedID() {\r
+ int max = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select MAX(feedid) from FEEDS");\r
+ if (rs.next()) {\r
+ max = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.info("getMaxFeedID: " + e.getMessage());\r
+ e.printStackTrace();\r
+ }\r
+ return max;\r
+ }\r
+\r
+ public static Collection<Feed> getAllFeeds() {\r
+ Map<Integer, Feed> map = new HashMap<Integer, Feed>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select * from FEEDS");\r
+ while (rs.next()) {\r
+ Feed feed = new Feed(rs);\r
+ map.put(feed.getFeedid(), feed);\r
+ }\r
+ rs.close();\r
+\r
+ String sql = "select * from FEED_ENDPOINT_IDS";\r
+ rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ int id = rs.getInt("FEEDID");\r
+ Feed feed = map.get(id);\r
+ if (feed != null) {\r
+ FeedEndpointID epi = new FeedEndpointID(rs);\r
+ Collection<FeedEndpointID> ecoll = feed.getAuthorization().getEndpoint_ids();\r
+ ecoll.add(epi);\r
+ }\r
+ }\r
+ rs.close();\r
+\r
+ sql = "select * from FEED_ENDPOINT_ADDRS";\r
+ rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ int id = rs.getInt("FEEDID");\r
+ Feed feed = map.get(id);\r
+ if (feed != null) {\r
+ Collection<String> acoll = feed.getAuthorization().getEndpoint_addrs();\r
+ acoll.add(rs.getString("ADDR"));\r
+ }\r
+ }\r
+ rs.close();\r
+\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return map.values();\r
+ }\r
+\r
+ public static List<String> getFilteredFeedUrlList(final String name, final String val) {\r
+ List<String> list = new ArrayList<String>();\r
+ String sql = "select SELF_LINK from FEEDS where DELETED = 0";\r
+ if (name.equals("name")) {\r
+ sql += " and NAME = ?";\r
+ } else if (name.equals("publ")) {\r
+ sql += " and PUBLISHER = ?";\r
+ } else if (name.equals("subs")) {\r
+ sql = "select distinct FEEDS.SELF_LINK from FEEDS, SUBSCRIPTIONS " +\r
+ "where DELETED = 0 " +\r
+ "and FEEDS.FEEDID = SUBSCRIPTIONS.FEEDID " +\r
+ "and SUBSCRIPTIONS.SUBSCRIBER = ?";\r
+ }\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(sql);\r
+ if (sql.indexOf('?') >= 0)\r
+ ps.setString(1, val);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ String t = rs.getString(1);\r
+ list.add(t.trim());\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return list;\r
+ }\r
+\r
+ @SuppressWarnings("resource")\r
+ private static Feed getFeedBySQL(String sql) {\r
+ Feed feed = null;\r
+ try {\r
+ DB db = new DB();\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ if (rs.next()) {\r
+ feed = new Feed(rs);\r
+ rs.close();\r
+\r
+ sql = "select * from FEED_ENDPOINT_IDS where FEEDID = " + feed.feedid;\r
+ rs = stmt.executeQuery(sql);\r
+ Collection<FeedEndpointID> ecoll = feed.getAuthorization().getEndpoint_ids();\r
+ while (rs.next()) {\r
+ FeedEndpointID epi = new FeedEndpointID(rs);\r
+ ecoll.add(epi);\r
+ }\r
+ rs.close();\r
+\r
+ sql = "select * from FEED_ENDPOINT_ADDRS where FEEDID = " + feed.feedid;\r
+ rs = stmt.executeQuery(sql);\r
+ Collection<String> acoll = feed.getAuthorization().getEndpoint_addrs();\r
+ while (rs.next()) {\r
+ acoll.add(rs.getString("ADDR"));\r
+ }\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return feed;\r
+ }\r
+\r
+ public Feed() {\r
+ this("", "", "", "");\r
+ }\r
+\r
+ public Feed(String name, String version, String desc, String business_description) {\r
+ this.feedid = -1;\r
+ this.groupid = -1; //New field is added - Groups feature Rally:US708115 - 1610\r
+ this.name = name;\r
+ this.version = version;\r
+ this.description = desc;\r
+ this.business_description = business_description; // New field is added - Groups feature Rally:US708102 - 1610\r
+ this.authorization = new FeedAuthorization();\r
+ this.publisher = "";\r
+ this.links = new FeedLinks();\r
+ this.deleted = false;\r
+ this.suspended = false;\r
+ this.last_mod = new Date();\r
+ this.created_date = new Date();\r
+ }\r
+\r
+ public Feed(ResultSet rs) throws SQLException {\r
+ this.feedid = rs.getInt("FEEDID");\r
+ this.groupid = rs.getInt("GROUPID"); //New field is added - Groups feature Rally:US708115 - 1610\r
+ this.name = rs.getString("NAME");\r
+ this.version = rs.getString("VERSION");\r
+ this.description = rs.getString("DESCRIPTION");\r
+ this.business_description = rs.getString("BUSINESS_DESCRIPTION"); // New field is added - Groups feature Rally:US708102 - 1610\r
+ this.authorization = new FeedAuthorization();\r
+ this.authorization.setClassification(rs.getString("AUTH_CLASS"));\r
+ this.publisher = rs.getString("PUBLISHER");\r
+ this.links = new FeedLinks();\r
+ this.links.setSelf(rs.getString("SELF_LINK"));\r
+ this.links.setPublish(rs.getString("PUBLISH_LINK"));\r
+ this.links.setSubscribe(rs.getString("SUBSCRIBE_LINK"));\r
+ this.links.setLog(rs.getString("LOG_LINK"));\r
+ this.deleted = rs.getBoolean("DELETED");\r
+ this.suspended = rs.getBoolean("SUSPENDED");\r
+ this.last_mod = rs.getDate("LAST_MOD");\r
+ this.created_date = rs.getTimestamp("CREATED_DATE");\r
+ }\r
+\r
+ public Feed(JSONObject jo) throws InvalidObjectException {\r
+ this("", "", "", "");\r
+ try {\r
+ // The JSONObject is assumed to contain a vnd.att-dr.feed representation\r
+ this.feedid = jo.optInt("feedid", -1);\r
+ this.groupid = jo.optInt("groupid"); //New field is added - Groups feature Rally:US708115 - 1610\r
+ this.name = jo.getString("name");\r
+ if (name.length() > 255)\r
+ throw new InvalidObjectException("name field is too long");\r
+ this.version = jo.getString("version");\r
+ if (version.length() > 20)\r
+ throw new InvalidObjectException("version field is too long");\r
+ this.description = jo.optString("description");\r
+ this.business_description = jo.optString("business_description"); // New field is added - Groups feature Rally:US708102 - 1610\r
+ if (description.length() > 1000)\r
+ throw new InvalidObjectException("technical description field is too long");\r
+\r
+ if (business_description.length() > 1000) // New field is added - Groups feature Rally:US708102 - 1610\r
+ throw new InvalidObjectException("business description field is too long");\r
+\r
+ this.authorization = new FeedAuthorization();\r
+ JSONObject jauth = jo.getJSONObject("authorization");\r
+ this.authorization.setClassification(jauth.getString("classification"));\r
+ if (this.authorization.getClassification().length() > 32)\r
+ throw new InvalidObjectException("classification field is too long");\r
+ JSONArray ja = jauth.getJSONArray("endpoint_ids");\r
+ for (int i = 0; i < ja.length(); i++) {\r
+ JSONObject id = ja.getJSONObject(i);\r
+ FeedEndpointID fid = new FeedEndpointID(id.getString("id"), id.getString("password"));\r
+ if (fid.getId().length() > 20)\r
+ throw new InvalidObjectException("id field is too long (" + fid.getId() + ")");\r
+ if (fid.getPassword().length() > 32)\r
+ throw new InvalidObjectException("password field is too long (" + fid.getPassword() + ")");\r
+ this.authorization.getEndpoint_ids().add(fid);\r
+ }\r
+ if (this.authorization.getEndpoint_ids().size() < 1)\r
+ throw new InvalidObjectException("need to specify at least one endpoint_id");\r
+ ja = jauth.getJSONArray("endpoint_addrs");\r
+ for (int i = 0; i < ja.length(); i++) {\r
+ String addr = ja.getString(i);\r
+ if (!JSONUtilities.validIPAddrOrSubnet(addr))\r
+ throw new InvalidObjectException("bad IP addr or subnet mask: " + addr);\r
+ this.authorization.getEndpoint_addrs().add(addr);\r
+ }\r
+\r
+ this.publisher = jo.optString("publisher", "");\r
+ this.deleted = jo.optBoolean("deleted", false);\r
+ this.suspended = jo.optBoolean("suspend", false);\r
+ JSONObject jol = jo.optJSONObject("links");\r
+ this.links = (jol == null) ? (new FeedLinks()) : (new FeedLinks(jol));\r
+ } catch (InvalidObjectException e) {\r
+ throw e;\r
+ } catch (Exception e) {\r
+ throw new InvalidObjectException("invalid JSON: " + e.getMessage());\r
+ }\r
+ }\r
+\r
+ public int getFeedid() {\r
+ return feedid;\r
+ }\r
+\r
+ public void setFeedid(int feedid) {\r
+ this.feedid = feedid;\r
+\r
+ // Create link URLs\r
+ FeedLinks fl = getLinks();\r
+ fl.setSelf(URLUtilities.generateFeedURL(feedid));\r
+ fl.setPublish(URLUtilities.generatePublishURL(feedid));\r
+ fl.setSubscribe(URLUtilities.generateSubscribeURL(feedid));\r
+ fl.setLog(URLUtilities.generateFeedLogURL(feedid));\r
+ }\r
+\r
+ //new getter setters for groups- Rally:US708115 - 1610\r
+ public int getGroupid() {\r
+ return groupid;\r
+ }\r
+\r
+ public void setGroupid(int groupid) {\r
+ this.groupid = groupid;\r
+ }\r
+\r
+ public String getName() {\r
+ return name;\r
+ }\r
+\r
+ public void setName(String name) {\r
+ this.name = name;\r
+ }\r
+\r
+ public String getVersion() {\r
+ return version;\r
+ }\r
+\r
+ public void setVersion(String version) {\r
+ this.version = version;\r
+ }\r
+\r
+ public String getDescription() {\r
+ return description;\r
+ }\r
+\r
+ public void setDescription(String description) {\r
+ this.description = description;\r
+ }\r
+\r
// New field is added - Groups feature Rally:US708102 - 1610\r
- public String getBusiness_description() {\r
- return business_description;\r
- }\r
-\r
- public void setBusiness_description(String business_description) {\r
- this.business_description = business_description;\r
- }\r
-\r
- public FeedAuthorization getAuthorization() {\r
- return authorization;\r
- }\r
- public void setAuthorization(FeedAuthorization authorization) {\r
- this.authorization = authorization;\r
- }\r
- public String getPublisher() {\r
- return publisher;\r
- }\r
- public void setPublisher(String publisher) {\r
- if (publisher != null) {\r
- if (publisher.length() > 8)\r
- publisher = publisher.substring(0, 8);\r
- this.publisher = publisher;\r
- }\r
- }\r
- public FeedLinks getLinks() {\r
- return links;\r
- }\r
- public void setLinks(FeedLinks links) {\r
- this.links = links;\r
- }\r
-\r
- public boolean isDeleted() {\r
- return deleted;\r
- }\r
-\r
- public void setDeleted(boolean deleted) {\r
- this.deleted = deleted;\r
- }\r
-\r
- public boolean isSuspended() {\r
- return suspended;\r
- }\r
-\r
- public void setSuspended(boolean suspended) {\r
- this.suspended = suspended;\r
- }\r
-\r
- public Date getLast_mod() {\r
- return last_mod;\r
- }\r
-\r
- public Date getCreated_date() {\r
- return created_date;\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("feedid", feedid);\r
- jo.put("groupid", groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
- jo.put("name", name);\r
- jo.put("version", version);\r
- jo.put("description", description);\r
- jo.put("business_description", business_description); // New field is added - Groups feature Rally:US708102 - 1610\r
- jo.put("authorization", authorization.asJSONObject());\r
- jo.put("publisher", publisher);\r
- jo.put("links", links.asJSONObject());\r
- jo.put("deleted", deleted);\r
- jo.put("suspend", suspended);\r
- jo.put("last_mod", last_mod.getTime());\r
- jo.put("created_date", created_date.getTime());\r
- return jo;\r
- }\r
- public JSONObject asLimitedJSONObject() {\r
- JSONObject jo = asJSONObject();\r
- jo.remove("deleted");\r
- jo.remove("feedid");\r
- jo.remove("last_mod");\r
- jo.remove("created_date");\r
- return jo;\r
- }\r
- public JSONObject asJSONObject(boolean hidepasswords) {\r
- JSONObject jo = asJSONObject();\r
- if (hidepasswords) {\r
- jo.remove("feedid"); // we no longer hide passwords, however we do hide these\r
- jo.remove("deleted");\r
- jo.remove("last_mod");\r
- jo.remove("created_date");\r
- }\r
- return jo;\r
- }\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "delete from FEEDS where FEEDID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, feedid);\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public synchronized boolean doInsert(Connection c) {\r
- boolean rv = true;\r
-// PreparedStatement ps = null;\r
- try {\r
- if (feedid == -1) {\r
-// // Get the next feedid\r
-// String sql = "insert into FEEDS_UNIQUEID (FEEDID) values (0)";\r
-// ps = c.prepareStatement(sql, new String[] { "FEEDID" });\r
-// ps.execute();\r
-// ResultSet rs = ps.getGeneratedKeys();\r
-// rs.first();\r
-// setFeedid(rs.getInt(1));\r
- // No feed ID assigned yet, so assign the next available one\r
- setFeedid(next_feedid++);\r
- }\r
- // In case we insert a feed from synchronization\r
- if (feedid > next_feedid)\r
- next_feedid = feedid+1;\r
-\r
- // Create FEED_ENDPOINT_IDS rows\r
- FeedAuthorization auth = getAuthorization();\r
- String sql = "insert into FEED_ENDPOINT_IDS values (?, ?, ?)";\r
- PreparedStatement ps2 = c.prepareStatement(sql);\r
- for (FeedEndpointID fid : auth.getEndpoint_ids()) {\r
- ps2.setInt(1, feedid);\r
- ps2.setString(2, fid.getId());\r
- ps2.setString(3, fid.getPassword());\r
- ps2.executeUpdate();\r
- }\r
- ps2.close();\r
-\r
- // Create FEED_ENDPOINT_ADDRS rows\r
- sql = "insert into FEED_ENDPOINT_ADDRS values (?, ?)";\r
- ps2 = c.prepareStatement(sql);\r
- for (String t : auth.getEndpoint_addrs()) {\r
- ps2.setInt(1, feedid);\r
- ps2.setString(2, t);\r
- ps2.executeUpdate();\r
- }\r
- ps2.close();\r
-\r
- // Finally, create the FEEDS row\r
- sql = "insert into FEEDS (FEEDID, NAME, VERSION, DESCRIPTION, AUTH_CLASS, PUBLISHER, SELF_LINK, PUBLISH_LINK, SUBSCRIBE_LINK, LOG_LINK, DELETED, SUSPENDED,BUSINESS_DESCRIPTION, GROUPID) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?)";\r
- ps2 = c.prepareStatement(sql);\r
- ps2.setInt(1, feedid);\r
- ps2.setString(2, getName());\r
- ps2.setString(3, getVersion());\r
- ps2.setString(4, getDescription());\r
- ps2.setString(5, getAuthorization().getClassification());\r
- ps2.setString(6, getPublisher());\r
- ps2.setString(7, getLinks().getSelf());\r
- ps2.setString(8, getLinks().getPublish());\r
- ps2.setString(9, getLinks().getSubscribe());\r
- ps2.setString(10, getLinks().getLog());\r
- ps2.setBoolean(11, isDeleted());\r
- ps2.setBoolean(12, isSuspended());\r
- ps2.setString(13,getBusiness_description()); // New field is added - Groups feature Rally:US708102 - 1610\r
- ps2.setInt(14,groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
- ps2.executeUpdate();\r
- ps2.close();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
-// } finally {\r
-// try {\r
-// ps.close();\r
-// } catch (SQLException e) {\r
-// e.printStackTrace();\r
-// }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- boolean rv = true;\r
- Feed oldobj = getFeedById(feedid);\r
- PreparedStatement ps = null;\r
- try {\r
- Set<FeedEndpointID> newset = getAuthorization().getEndpoint_ids();\r
- Set<FeedEndpointID> oldset = oldobj.getAuthorization().getEndpoint_ids();\r
-\r
- // Insert new FEED_ENDPOINT_IDS rows\r
- String sql = "insert into FEED_ENDPOINT_IDS values (?, ?, ?)";\r
- ps = c.prepareStatement(sql);\r
- for (FeedEndpointID fid : newset) {\r
- if (!oldset.contains(fid)) {\r
- ps.setInt(1, feedid);\r
- ps.setString(2, fid.getId());\r
- ps.setString(3, fid.getPassword());\r
- ps.executeUpdate();\r
- }\r
- }\r
- ps.close();\r
-\r
- // Delete old FEED_ENDPOINT_IDS rows\r
- sql = "delete from FEED_ENDPOINT_IDS where FEEDID = ? AND USERID = ? AND PASSWORD = ?";\r
- ps = c.prepareStatement(sql);\r
- for (FeedEndpointID fid : oldset) {\r
- if (!newset.contains(fid)) {\r
- ps.setInt(1, feedid);\r
- ps.setString(2, fid.getId());\r
- ps.setString(3, fid.getPassword());\r
- ps.executeUpdate();\r
- }\r
- }\r
- ps.close();\r
-\r
- // Insert new FEED_ENDPOINT_ADDRS rows\r
- Set<String> newset2 = getAuthorization().getEndpoint_addrs();\r
- Set<String> oldset2 = oldobj.getAuthorization().getEndpoint_addrs();\r
- sql = "insert into FEED_ENDPOINT_ADDRS values (?, ?)";\r
- ps = c.prepareStatement(sql);\r
- for (String t : newset2) {\r
- if (!oldset2.contains(t)) {\r
- ps.setInt(1, feedid);\r
- ps.setString(2, t);\r
- ps.executeUpdate();\r
- }\r
- }\r
- ps.close();\r
-\r
- // Delete old FEED_ENDPOINT_ADDRS rows\r
- sql = "delete from FEED_ENDPOINT_ADDRS where FEEDID = ? AND ADDR = ?";\r
- ps = c.prepareStatement(sql);\r
- for (String t : oldset2) {\r
- if (!newset2.contains(t)) {\r
- ps.setInt(1, feedid);\r
- ps.setString(2, t);\r
- ps.executeUpdate();\r
- }\r
- }\r
- ps.close();\r
-\r
- // Finally, update the FEEDS row\r
- sql = "update FEEDS set DESCRIPTION = ?, AUTH_CLASS = ?, DELETED = ?, SUSPENDED = ?, BUSINESS_DESCRIPTION=?, GROUPID=? where FEEDID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, getDescription());\r
- ps.setString(2, getAuthorization().getClassification());\r
- ps.setInt(3, deleted ? 1 : 0);\r
- ps.setInt(4, suspended ? 1 : 0);\r
- ps.setString(5, getBusiness_description()); // New field is added - Groups feature Rally:US708102 - 1610\r
- ps.setInt(6, groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
- ps.setInt(7, feedid);\r
- ps.executeUpdate();\r
- ps.close();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- if (ps != null)\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- \r
- /**Rally US708115\r
- * Change Ownership of FEED - 1610\r
- * */\r
- public boolean changeOwnerShip() {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- \r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection c = db.getConnection();\r
- String sql = "update FEEDS set PUBLISHER = ? where FEEDID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, this.publisher);\r
- ps.setInt(2, feedid);\r
- ps.execute();\r
- ps.close();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
-\r
- @Override\r
- public String getKey() {\r
- return ""+getFeedid();\r
- }\r
-\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof Feed))\r
- return false;\r
- Feed of = (Feed) obj;\r
- if (feedid != of.feedid)\r
- return false;\r
- if (groupid != of.groupid) //New field is added - Groups feature Rally:US708115 - 1610\r
- return false;\r
- if (!name.equals(of.name))\r
- return false;\r
- if (!version.equals(of.version))\r
- return false;\r
- if (!description.equals(of.description))\r
- return false;\r
- if (!business_description.equals(of.business_description)) // New field is added - Groups feature Rally:US708102 - 1610\r
- return false;\r
- if (!publisher.equals(of.publisher))\r
- return false;\r
- if (!authorization.equals(of.authorization))\r
- return false;\r
- if (!links.equals(of.links))\r
- return false;\r
- if (deleted != of.deleted)\r
- return false;\r
- if (suspended != of.suspended)\r
- return false;\r
- return true;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return "FEED: feedid=" + feedid + ", name=" + name + ", version=" + version;\r
- }\r
+ public String getBusiness_description() {\r
+ return business_description;\r
+ }\r
+\r
+ public void setBusiness_description(String business_description) {\r
+ this.business_description = business_description;\r
+ }\r
+\r
+ public FeedAuthorization getAuthorization() {\r
+ return authorization;\r
+ }\r
+\r
+ public void setAuthorization(FeedAuthorization authorization) {\r
+ this.authorization = authorization;\r
+ }\r
+\r
+ public String getPublisher() {\r
+ return publisher;\r
+ }\r
+\r
+ public void setPublisher(String publisher) {\r
+ if (publisher != null) {\r
+ if (publisher.length() > 8)\r
+ publisher = publisher.substring(0, 8);\r
+ this.publisher = publisher;\r
+ }\r
+ }\r
+\r
+ public FeedLinks getLinks() {\r
+ return links;\r
+ }\r
+\r
+ public void setLinks(FeedLinks links) {\r
+ this.links = links;\r
+ }\r
+\r
+ public boolean isDeleted() {\r
+ return deleted;\r
+ }\r
+\r
+ public void setDeleted(boolean deleted) {\r
+ this.deleted = deleted;\r
+ }\r
+\r
+ public boolean isSuspended() {\r
+ return suspended;\r
+ }\r
+\r
+ public void setSuspended(boolean suspended) {\r
+ this.suspended = suspended;\r
+ }\r
+\r
+ public Date getLast_mod() {\r
+ return last_mod;\r
+ }\r
+\r
+ public Date getCreated_date() {\r
+ return created_date;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("feedid", feedid);\r
+ jo.put("groupid", groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
+ jo.put("name", name);\r
+ jo.put("version", version);\r
+ jo.put("description", description);\r
+ jo.put("business_description", business_description); // New field is added - Groups feature Rally:US708102 - 1610\r
+ jo.put("authorization", authorization.asJSONObject());\r
+ jo.put("publisher", publisher);\r
+ jo.put("links", links.asJSONObject());\r
+ jo.put("deleted", deleted);\r
+ jo.put("suspend", suspended);\r
+ jo.put("last_mod", last_mod.getTime());\r
+ jo.put("created_date", created_date.getTime());\r
+ return jo;\r
+ }\r
+\r
+ public JSONObject asLimitedJSONObject() {\r
+ JSONObject jo = asJSONObject();\r
+ jo.remove("deleted");\r
+ jo.remove("feedid");\r
+ jo.remove("last_mod");\r
+ jo.remove("created_date");\r
+ return jo;\r
+ }\r
+\r
+ public JSONObject asJSONObject(boolean hidepasswords) {\r
+ JSONObject jo = asJSONObject();\r
+ if (hidepasswords) {\r
+ jo.remove("feedid"); // we no longer hide passwords, however we do hide these\r
+ jo.remove("deleted");\r
+ jo.remove("last_mod");\r
+ jo.remove("created_date");\r
+ }\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "delete from FEEDS where FEEDID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, feedid);\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public synchronized boolean doInsert(Connection c) {\r
+ boolean rv = true;\r
+// PreparedStatement ps = null;\r
+ try {\r
+ if (feedid == -1) {\r
+// // Get the next feedid\r
+// String sql = "insert into FEEDS_UNIQUEID (FEEDID) values (0)";\r
+// ps = c.prepareStatement(sql, new String[] { "FEEDID" });\r
+// ps.execute();\r
+// ResultSet rs = ps.getGeneratedKeys();\r
+// rs.first();\r
+// setFeedid(rs.getInt(1));\r
+ // No feed ID assigned yet, so assign the next available one\r
+ setFeedid(next_feedid++);\r
+ }\r
+ // In case we insert a feed from synchronization\r
+ if (feedid > next_feedid)\r
+ next_feedid = feedid + 1;\r
+\r
+ // Create FEED_ENDPOINT_IDS rows\r
+ FeedAuthorization auth = getAuthorization();\r
+ String sql = "insert into FEED_ENDPOINT_IDS values (?, ?, ?)";\r
+ PreparedStatement ps2 = c.prepareStatement(sql);\r
+ for (FeedEndpointID fid : auth.getEndpoint_ids()) {\r
+ ps2.setInt(1, feedid);\r
+ ps2.setString(2, fid.getId());\r
+ ps2.setString(3, fid.getPassword());\r
+ ps2.executeUpdate();\r
+ }\r
+ ps2.close();\r
+\r
+ // Create FEED_ENDPOINT_ADDRS rows\r
+ sql = "insert into FEED_ENDPOINT_ADDRS values (?, ?)";\r
+ ps2 = c.prepareStatement(sql);\r
+ for (String t : auth.getEndpoint_addrs()) {\r
+ ps2.setInt(1, feedid);\r
+ ps2.setString(2, t);\r
+ ps2.executeUpdate();\r
+ }\r
+ ps2.close();\r
+\r
+ // Finally, create the FEEDS row\r
+ sql = "insert into FEEDS (FEEDID, NAME, VERSION, DESCRIPTION, AUTH_CLASS, PUBLISHER, SELF_LINK, PUBLISH_LINK, SUBSCRIBE_LINK, LOG_LINK, DELETED, SUSPENDED,BUSINESS_DESCRIPTION, GROUPID) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?)";\r
+ ps2 = c.prepareStatement(sql);\r
+ ps2.setInt(1, feedid);\r
+ ps2.setString(2, getName());\r
+ ps2.setString(3, getVersion());\r
+ ps2.setString(4, getDescription());\r
+ ps2.setString(5, getAuthorization().getClassification());\r
+ ps2.setString(6, getPublisher());\r
+ ps2.setString(7, getLinks().getSelf());\r
+ ps2.setString(8, getLinks().getPublish());\r
+ ps2.setString(9, getLinks().getSubscribe());\r
+ ps2.setString(10, getLinks().getLog());\r
+ ps2.setBoolean(11, isDeleted());\r
+ ps2.setBoolean(12, isSuspended());\r
+ ps2.setString(13, getBusiness_description()); // New field is added - Groups feature Rally:US708102 - 1610\r
+ ps2.setInt(14, groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
+ ps2.executeUpdate();\r
+ ps2.close();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+// } finally {\r
+// try {\r
+// ps.close();\r
+// } catch (SQLException e) {\r
+// e.printStackTrace();\r
+// }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ boolean rv = true;\r
+ Feed oldobj = getFeedById(feedid);\r
+ PreparedStatement ps = null;\r
+ try {\r
+ Set<FeedEndpointID> newset = getAuthorization().getEndpoint_ids();\r
+ Set<FeedEndpointID> oldset = oldobj.getAuthorization().getEndpoint_ids();\r
+\r
+ // Insert new FEED_ENDPOINT_IDS rows\r
+ String sql = "insert into FEED_ENDPOINT_IDS values (?, ?, ?)";\r
+ ps = c.prepareStatement(sql);\r
+ for (FeedEndpointID fid : newset) {\r
+ if (!oldset.contains(fid)) {\r
+ ps.setInt(1, feedid);\r
+ ps.setString(2, fid.getId());\r
+ ps.setString(3, fid.getPassword());\r
+ ps.executeUpdate();\r
+ }\r
+ }\r
+ ps.close();\r
+\r
+ // Delete old FEED_ENDPOINT_IDS rows\r
+ sql = "delete from FEED_ENDPOINT_IDS where FEEDID = ? AND USERID = ? AND PASSWORD = ?";\r
+ ps = c.prepareStatement(sql);\r
+ for (FeedEndpointID fid : oldset) {\r
+ if (!newset.contains(fid)) {\r
+ ps.setInt(1, feedid);\r
+ ps.setString(2, fid.getId());\r
+ ps.setString(3, fid.getPassword());\r
+ ps.executeUpdate();\r
+ }\r
+ }\r
+ ps.close();\r
+\r
+ // Insert new FEED_ENDPOINT_ADDRS rows\r
+ Set<String> newset2 = getAuthorization().getEndpoint_addrs();\r
+ Set<String> oldset2 = oldobj.getAuthorization().getEndpoint_addrs();\r
+ sql = "insert into FEED_ENDPOINT_ADDRS values (?, ?)";\r
+ ps = c.prepareStatement(sql);\r
+ for (String t : newset2) {\r
+ if (!oldset2.contains(t)) {\r
+ ps.setInt(1, feedid);\r
+ ps.setString(2, t);\r
+ ps.executeUpdate();\r
+ }\r
+ }\r
+ ps.close();\r
+\r
+ // Delete old FEED_ENDPOINT_ADDRS rows\r
+ sql = "delete from FEED_ENDPOINT_ADDRS where FEEDID = ? AND ADDR = ?";\r
+ ps = c.prepareStatement(sql);\r
+ for (String t : oldset2) {\r
+ if (!newset2.contains(t)) {\r
+ ps.setInt(1, feedid);\r
+ ps.setString(2, t);\r
+ ps.executeUpdate();\r
+ }\r
+ }\r
+ ps.close();\r
+\r
+ // Finally, update the FEEDS row\r
+ sql = "update FEEDS set DESCRIPTION = ?, AUTH_CLASS = ?, DELETED = ?, SUSPENDED = ?, BUSINESS_DESCRIPTION=?, GROUPID=? where FEEDID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, getDescription());\r
+ ps.setString(2, getAuthorization().getClassification());\r
+ ps.setInt(3, deleted ? 1 : 0);\r
+ ps.setInt(4, suspended ? 1 : 0);\r
+ ps.setString(5, getBusiness_description()); // New field is added - Groups feature Rally:US708102 - 1610\r
+ ps.setInt(6, groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
+ ps.setInt(7, feedid);\r
+ ps.executeUpdate();\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ if (ps != null)\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ /**\r
+ * Rally US708115\r
+ * Change Ownership of FEED - 1610\r
+ */\r
+ public boolean changeOwnerShip() {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection c = db.getConnection();\r
+ String sql = "update FEEDS set PUBLISHER = ? where FEEDID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, this.publisher);\r
+ ps.setInt(2, feedid);\r
+ ps.execute();\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+\r
+ @Override\r
+ public String getKey() {\r
+ return "" + getFeedid();\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof Feed))\r
+ return false;\r
+ Feed of = (Feed) obj;\r
+ if (feedid != of.feedid)\r
+ return false;\r
+ if (groupid != of.groupid) //New field is added - Groups feature Rally:US708115 - 1610\r
+ return false;\r
+ if (!name.equals(of.name))\r
+ return false;\r
+ if (!version.equals(of.version))\r
+ return false;\r
+ if (!description.equals(of.description))\r
+ return false;\r
+ if (!business_description.equals(of.business_description)) // New field is added - Groups feature Rally:US708102 - 1610\r
+ return false;\r
+ if (!publisher.equals(of.publisher))\r
+ return false;\r
+ if (!authorization.equals(of.authorization))\r
+ return false;\r
+ if (!links.equals(of.links))\r
+ return false;\r
+ if (deleted != of.deleted)\r
+ return false;\r
+ if (suspended != of.suspended)\r
+ return false;\r
+ return true;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return "FEED: feedid=" + feedid + ", name=" + name + ", version=" + version;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Feed authorization. This encapsulates the authorization information about a feed.\r
+ *\r
* @author Robert Eby\r
* @version $Id: FeedAuthorization.java,v 1.2 2013/06/20 14:11:05 eby Exp $\r
*/\r
public class FeedAuthorization implements JSONable {\r
- private String classification;\r
- private Set<FeedEndpointID> endpoint_ids;\r
- private Set<String> endpoint_addrs;\r
+ private String classification;\r
+ private Set<FeedEndpointID> endpoint_ids;\r
+ private Set<String> endpoint_addrs;\r
+\r
+ public FeedAuthorization() {\r
+ this.classification = "";\r
+ this.endpoint_ids = new HashSet<FeedEndpointID>();\r
+ this.endpoint_addrs = new HashSet<String>();\r
+ }\r
+\r
+ public String getClassification() {\r
+ return classification;\r
+ }\r
+\r
+ public void setClassification(String classification) {\r
+ this.classification = classification;\r
+ }\r
+\r
+ public Set<FeedEndpointID> getEndpoint_ids() {\r
+ return endpoint_ids;\r
+ }\r
+\r
+ public void setEndpoint_ids(Set<FeedEndpointID> endpoint_ids) {\r
+ this.endpoint_ids = endpoint_ids;\r
+ }\r
+\r
+ public Set<String> getEndpoint_addrs() {\r
+ return endpoint_addrs;\r
+ }\r
+\r
+ public void setEndpoint_addrs(Set<String> endpoint_addrs) {\r
+ this.endpoint_addrs = endpoint_addrs;\r
+ }\r
\r
- public FeedAuthorization() {\r
- this.classification = "";\r
- this.endpoint_ids = new HashSet<FeedEndpointID>();\r
- this.endpoint_addrs = new HashSet<String>();\r
- }\r
- public String getClassification() {\r
- return classification;\r
- }\r
- public void setClassification(String classification) {\r
- this.classification = classification;\r
- }\r
- public Set<FeedEndpointID> getEndpoint_ids() {\r
- return endpoint_ids;\r
- }\r
- public void setEndpoint_ids(Set<FeedEndpointID> endpoint_ids) {\r
- this.endpoint_ids = endpoint_ids;\r
- }\r
- public Set<String> getEndpoint_addrs() {\r
- return endpoint_addrs;\r
- }\r
- public void setEndpoint_addrs(Set<String> endpoint_addrs) {\r
- this.endpoint_addrs = endpoint_addrs;\r
- }\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("classification", classification);\r
+ JSONArray ja = new JSONArray();\r
+ for (FeedEndpointID eid : endpoint_ids) {\r
+ ja.put(eid.asJSONObject());\r
+ }\r
+ jo.put("endpoint_ids", ja);\r
+ ja = new JSONArray();\r
+ for (String t : endpoint_addrs) {\r
+ ja.put(t);\r
+ }\r
+ jo.put("endpoint_addrs", ja);\r
+ return jo;\r
+ }\r
\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("classification", classification);\r
- JSONArray ja = new JSONArray();\r
- for (FeedEndpointID eid : endpoint_ids) {\r
- ja.put(eid.asJSONObject());\r
- }\r
- jo.put("endpoint_ids", ja);\r
- ja = new JSONArray();\r
- for (String t : endpoint_addrs) {\r
- ja.put(t);\r
- }\r
- jo.put("endpoint_addrs", ja);\r
- return jo;\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof FeedAuthorization))\r
- return false;\r
- FeedAuthorization of = (FeedAuthorization) obj;\r
- if (!classification.equals(of.classification))\r
- return false;\r
- if (!endpoint_ids.equals(of.endpoint_ids))\r
- return false;\r
- if (!endpoint_addrs.equals(of.endpoint_addrs))\r
- return false;\r
- return true;\r
- }\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof FeedAuthorization))\r
+ return false;\r
+ FeedAuthorization of = (FeedAuthorization) obj;\r
+ if (!classification.equals(of.classification))\r
+ return false;\r
+ if (!endpoint_ids.equals(of.endpoint_ids))\r
+ return false;\r
+ if (!endpoint_addrs.equals(of.endpoint_addrs))\r
+ return false;\r
+ return true;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Feed endpoint. This contains a login/password pair.\r
+ *\r
* @author Robert Eby\r
* @version $Id: FeedEndpointID.java,v 1.1 2013/04/26 21:00:26 eby Exp $\r
*/\r
public class FeedEndpointID implements JSONable {\r
- private String id;\r
- private String password;\r
+ private String id;\r
+ private String password;\r
+\r
+ public FeedEndpointID() {\r
+ this("", "");\r
+ }\r
+\r
+ public FeedEndpointID(String id, String password) {\r
+ this.id = id;\r
+ this.password = password;\r
+ }\r
+\r
+ public FeedEndpointID(ResultSet rs) throws SQLException {\r
+ this.id = rs.getString("USERID");\r
+ this.password = rs.getString("PASSWORD");\r
+ }\r
+\r
+ public String getId() {\r
+ return id;\r
+ }\r
\r
- public FeedEndpointID() {\r
- this("", "");\r
- }\r
- public FeedEndpointID(String id, String password) {\r
- this.id = id;\r
- this.password = password;\r
- }\r
- public FeedEndpointID(ResultSet rs) throws SQLException {\r
- this.id = rs.getString("USERID");\r
- this.password = rs.getString("PASSWORD");\r
- }\r
+ public void setId(String id) {\r
+ this.id = id;\r
+ }\r
\r
- public String getId() {\r
- return id;\r
- }\r
+ public String getPassword() {\r
+ return password;\r
+ }\r
\r
- public void setId(String id) {\r
- this.id = id;\r
- }\r
+ public void setPassword(String password) {\r
+ this.password = password;\r
+ }\r
\r
- public String getPassword() {\r
- return password;\r
- }\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("id", id);\r
+ jo.put("password", password);\r
+ return jo;\r
+ }\r
\r
- public void setPassword(String password) {\r
- this.password = password;\r
- }\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof FeedEndpointID))\r
+ return false;\r
+ FeedEndpointID f2 = (FeedEndpointID) obj;\r
+ return id.equals(f2.id) && password.equals(f2.password);\r
+ }\r
\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("id", id);\r
- jo.put("password", password);\r
- return jo;\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof FeedEndpointID))\r
- return false;\r
- FeedEndpointID f2 = (FeedEndpointID) obj;\r
- return id.equals(f2.id) && password.equals(f2.password);\r
- }\r
- @Override\r
- public int hashCode() {\r
- return (id + ":" + password).hashCode();\r
- }\r
+ @Override\r
+ public int hashCode() {\r
+ return (id + ":" + password).hashCode();\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The URLs associated with a Feed.\r
+ *\r
* @author Robert Eby\r
* @version $Id: FeedLinks.java,v 1.3 2013/07/05 13:48:05 eby Exp $\r
*/\r
public class FeedLinks implements JSONable {\r
- private String self;\r
- private String publish;\r
- private String subscribe;\r
- private String log;\r
-\r
- public FeedLinks() {\r
- self = publish = subscribe = log = null;\r
- }\r
-\r
- public FeedLinks(JSONObject jo) throws InvalidObjectException {\r
- this();\r
- self = jo.getString("self");\r
- publish = jo.getString("publish");\r
- subscribe = jo.getString("subscribe");\r
- log = jo.getString("log");\r
- }\r
-\r
- public String getSelf() {\r
- return self;\r
- }\r
- public void setSelf(String self) {\r
- this.self = self;\r
- }\r
- public String getPublish() {\r
- return publish;\r
- }\r
- public void setPublish(String publish) {\r
- this.publish = publish;\r
- }\r
- public String getSubscribe() {\r
- return subscribe;\r
- }\r
- public void setSubscribe(String subscribe) {\r
- this.subscribe = subscribe;\r
- }\r
- public String getLog() {\r
- return log;\r
- }\r
- public void setLog(String log) {\r
- this.log = log;\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("self", self);\r
- jo.put("publish", publish);\r
- jo.put("subscribe", subscribe);\r
- jo.put("log", log);\r
- return jo;\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof FeedLinks))\r
- return false;\r
- FeedLinks of = (FeedLinks) obj;\r
- if (!self.equals(of.self))\r
- return false;\r
- if (!publish.equals(of.publish))\r
- return false;\r
- if (!subscribe.equals(of.subscribe))\r
- return false;\r
- if (!log.equals(of.log))\r
- return false;\r
- return true;\r
- }\r
+ private String self;\r
+ private String publish;\r
+ private String subscribe;\r
+ private String log;\r
+\r
+ public FeedLinks() {\r
+ self = publish = subscribe = log = null;\r
+ }\r
+\r
+ public FeedLinks(JSONObject jo) throws InvalidObjectException {\r
+ this();\r
+ self = jo.getString("self");\r
+ publish = jo.getString("publish");\r
+ subscribe = jo.getString("subscribe");\r
+ log = jo.getString("log");\r
+ }\r
+\r
+ public String getSelf() {\r
+ return self;\r
+ }\r
+\r
+ public void setSelf(String self) {\r
+ this.self = self;\r
+ }\r
+\r
+ public String getPublish() {\r
+ return publish;\r
+ }\r
+\r
+ public void setPublish(String publish) {\r
+ this.publish = publish;\r
+ }\r
+\r
+ public String getSubscribe() {\r
+ return subscribe;\r
+ }\r
+\r
+ public void setSubscribe(String subscribe) {\r
+ this.subscribe = subscribe;\r
+ }\r
+\r
+ public String getLog() {\r
+ return log;\r
+ }\r
+\r
+ public void setLog(String log) {\r
+ this.log = log;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("self", self);\r
+ jo.put("publish", publish);\r
+ jo.put("subscribe", subscribe);\r
+ jo.put("log", log);\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof FeedLinks))\r
+ return false;\r
+ FeedLinks of = (FeedLinks) obj;\r
+ if (!self.equals(of.self))\r
+ return false;\r
+ if (!publish.equals(of.publish))\r
+ return false;\r
+ if (!subscribe.equals(of.subscribe))\r
+ return false;\r
+ if (!log.equals(of.log))\r
+ return false;\r
+ return true;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Subscription. Subscriptions can be retrieved from the DB, or stored/updated in the DB.\r
+ *\r
* @author vikram\r
- * @version $Id: Group.java,v 1.0 2016/07/19 \r
+ * @version $Id: Group.java,v 1.0 2016/07/19\r
*/\r
public class Group extends Syncable {\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private static int next_groupid = getMaxGroupID() + 1;\r
-\r
- private int groupid;\r
- private String authid;\r
- private String name;\r
- private String description;\r
- private String classification;\r
- private String members;\r
- private Date last_mod;\r
- \r
- \r
- public static Group getGroupMatching(Group gup) {\r
- String sql = String.format(\r
- "select * from GROUPS where NAME = \"%s\"",\r
- gup.getName()\r
- );\r
- List<Group> list = getGroupsForSQL(sql);\r
- return list.size() > 0 ? list.get(0) : null;\r
- }\r
- \r
- public static Group getGroupMatching(Group gup, int groupid) {\r
- String sql = String.format(\r
- "select * from GROUPS where NAME = \"%s\" and GROUPID != %d ",\r
- gup.getName(),\r
- gup.getGroupid()\r
- );\r
- List<Group> list = getGroupsForSQL(sql);\r
- return list.size() > 0 ? list.get(0) : null;\r
- }\r
- \r
- public static Group getGroupById(int id) {\r
- String sql = "select * from GROUPS where GROUPID = " + id;\r
- List<Group> list = getGroupsForSQL(sql);\r
- return list.size() > 0 ? list.get(0) : null;\r
- }\r
- \r
- public static Group getGroupByAuthId(String id) {\r
- String sql = "select * from GROUPS where AUTHID = '" + id +"'";\r
- List<Group> list = getGroupsForSQL(sql);\r
- return list.size() > 0 ? list.get(0) : null;\r
- }\r
- \r
- public static Collection<Group> getAllgroups() {\r
- return getGroupsForSQL("select * from GROUPS");\r
- }\r
- private static List<Group> getGroupsForSQL(String sql) {\r
- List<Group> list = new ArrayList<Group>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- Group group = new Group(rs);\r
- list.add(group);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return list;\r
- }\r
- public static int getMaxGroupID() {\r
- int max = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select MAX(groupid) from GROUPS");\r
- if (rs.next()) {\r
- max = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.info("getMaxSubID: "+e.getMessage());\r
- e.printStackTrace();\r
- }\r
- return max;\r
- }\r
- public static Collection<String> getGroupsByClassfication(String classfication) {\r
- List<String> list = new ArrayList<String>();\r
- String sql = "select * from GROUPS where classification = '"+classfication+"'";\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- int groupid = rs.getInt("groupid");\r
- //list.add(URLUtilities.generateSubscriptionURL(groupid));\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return list;\r
- }\r
- /**\r
- * Return a count of the number of active subscriptions in the DB.\r
- * @return the count\r
- */\r
- public static int countActiveSubscriptions() {\r
- int count = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select count(*) from SUBSCRIPTIONS");\r
- if (rs.next()) {\r
- count = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.warn("PROV0008 countActiveSubscriptions: "+e.getMessage());\r
- e.printStackTrace();\r
- }\r
- return count;\r
- }\r
-\r
- public Group() {\r
- this("", "", "");\r
- }\r
- public Group(String name, String desc, String members) {\r
- this.groupid = -1;\r
- this.authid = "";\r
- this.name = name;\r
- this.description = desc;\r
- this.members = members;\r
- this.classification = "";\r
- this.last_mod = new Date();\r
- }\r
- \r
- \r
- public Group(ResultSet rs) throws SQLException {\r
- this.groupid = rs.getInt("GROUPID");\r
- this.authid = rs.getString("AUTHID");\r
- this.name = rs.getString("NAME");\r
- this.description = rs.getString("DESCRIPTION");\r
- this.classification = rs.getString("CLASSIFICATION");\r
- this.members = rs.getString("MEMBERS");\r
- this.last_mod = rs.getDate("LAST_MOD");\r
- }\r
- \r
-\r
- \r
- public Group(JSONObject jo) throws InvalidObjectException {\r
- this("", "", "");\r
- try {\r
- // The JSONObject is assumed to contain a vnd.att-dr.group representation\r
- this.groupid = jo.optInt("groupid", -1);\r
- String gname = jo.getString("name");\r
- String gdescription = jo.getString("description");\r
- \r
- this.authid = jo.getString("authid");\r
- this.name = gname;\r
- this.description = gdescription;\r
- this.classification = jo.getString("classification");\r
- this.members = jo.getString("members");\r
- \r
- if (gname.length() > 50)\r
- throw new InvalidObjectException("Group name is too long");\r
- if (gdescription.length() > 256)\r
- throw new InvalidObjectException("Group Description is too long");\r
- } catch (InvalidObjectException e) {\r
- throw e;\r
- } catch (Exception e) {\r
- throw new InvalidObjectException("invalid JSON: "+e.getMessage());\r
- }\r
- }\r
- public int getGroupid() {\r
- return groupid;\r
- }\r
- \r
- public static Logger getIntlogger() {\r
- return intlogger;\r
- }\r
- public void setGroupid(int groupid) {\r
- this.groupid = groupid;\r
- }\r
- \r
- public static void setIntlogger(Logger intlogger) {\r
- Group.intlogger = intlogger;\r
- }\r
- public static int getNext_groupid() {\r
- return next_groupid;\r
- }\r
- public static void setNext_groupid(int next_groupid) {\r
- Group.next_groupid = next_groupid;\r
- }\r
- public String getAuthid() {\r
- return authid;\r
- }\r
- public void setAuthid(String authid) {\r
- this.authid = authid;\r
- }\r
- public String getName() {\r
- return name;\r
- }\r
- public void setName(String name) {\r
- this.name = name;\r
- }\r
- public String getDescription() {\r
- return description;\r
- }\r
- public void setDescription(String description) {\r
- this.description = description;\r
- }\r
- public String getClassification() {\r
- return classification;\r
- }\r
- public void setClassification(String classification) {\r
- this.classification = classification;\r
- }\r
- public String getMembers() {\r
- return members;\r
- }\r
- public void setMembers(String members) {\r
- this.members = members;\r
- }\r
- public Date getLast_mod() {\r
- return last_mod;\r
- }\r
- public void setLast_mod(Date last_mod) {\r
- this.last_mod = last_mod;\r
- }\r
- \r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("groupid", groupid);\r
- jo.put("authid", authid);\r
- jo.put("name", name);\r
- jo.put("description", description);\r
- jo.put("classification", classification);\r
- jo.put("members", members);\r
- jo.put("last_mod", last_mod.getTime());\r
- return jo;\r
- }\r
- @Override\r
- public boolean doInsert(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- if (groupid == -1) {\r
- // No feed ID assigned yet, so assign the next available one\r
- setGroupid(next_groupid++);\r
- }\r
- // In case we insert a gropup from synchronization\r
- if (groupid > next_groupid)\r
- next_groupid = groupid+1;\r
-\r
- \r
- // Create the GROUPS row\r
- String sql = "insert into GROUPS (GROUPID, AUTHID, NAME, DESCRIPTION, CLASSIFICATION, MEMBERS) values (?, ?, ?, ?, ?, ?)";\r
- ps = c.prepareStatement(sql, new String[] { "GROUPID" });\r
- ps.setInt(1, groupid);\r
- ps.setString(2, authid);\r
- ps.setString(3, name);\r
- ps.setString(4, description);\r
- ps.setString(5, classification);\r
- ps.setString(6, members);\r
- ps.execute();\r
- ps.close();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "update GROUPS set AUTHID = ?, NAME = ?, DESCRIPTION = ?, CLASSIFICATION = ? , MEMBERS = ? where GROUPID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, authid);\r
- ps.setString(2, name);\r
- ps.setString(3, description);\r
- ps.setString(4, classification);\r
- ps.setString(5, members);\r
- ps.setInt(6, groupid);\r
- ps.executeUpdate();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "delete from GROUPS where GROUPID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, groupid);\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public String getKey() {\r
- return ""+getGroupid();\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof Group))\r
- return false;\r
- Group os = (Group) obj;\r
- if (groupid != os.groupid)\r
- return false;\r
- if (authid != os.authid)\r
- return false;\r
- if (!name.equals(os.name))\r
- return false;\r
- if (description != os.description)\r
- return false;\r
- if (!classification.equals(os.classification))\r
- return false;\r
- if (!members.equals(os.members))\r
- return false;\r
- \r
- return true;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return "GROUP: groupid=" + groupid;\r
- }\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private static int next_groupid = getMaxGroupID() + 1;\r
+\r
+ private int groupid;\r
+ private String authid;\r
+ private String name;\r
+ private String description;\r
+ private String classification;\r
+ private String members;\r
+ private Date last_mod;\r
+\r
+\r
+ public static Group getGroupMatching(Group gup) {\r
+ String sql = String.format(\r
+ "select * from GROUPS where NAME = \"%s\"",\r
+ gup.getName()\r
+ );\r
+ List<Group> list = getGroupsForSQL(sql);\r
+ return list.size() > 0 ? list.get(0) : null;\r
+ }\r
+\r
+ public static Group getGroupMatching(Group gup, int groupid) {\r
+ String sql = String.format(\r
+ "select * from GROUPS where NAME = \"%s\" and GROUPID != %d ",\r
+ gup.getName(),\r
+ gup.getGroupid()\r
+ );\r
+ List<Group> list = getGroupsForSQL(sql);\r
+ return list.size() > 0 ? list.get(0) : null;\r
+ }\r
+\r
+ public static Group getGroupById(int id) {\r
+ String sql = "select * from GROUPS where GROUPID = " + id;\r
+ List<Group> list = getGroupsForSQL(sql);\r
+ return list.size() > 0 ? list.get(0) : null;\r
+ }\r
+\r
+ public static Group getGroupByAuthId(String id) {\r
+ String sql = "select * from GROUPS where AUTHID = '" + id + "'";\r
+ List<Group> list = getGroupsForSQL(sql);\r
+ return list.size() > 0 ? list.get(0) : null;\r
+ }\r
+\r
+ public static Collection<Group> getAllgroups() {\r
+ return getGroupsForSQL("select * from GROUPS");\r
+ }\r
+\r
+ private static List<Group> getGroupsForSQL(String sql) {\r
+ List<Group> list = new ArrayList<Group>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ Group group = new Group(rs);\r
+ list.add(group);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return list;\r
+ }\r
+\r
+ public static int getMaxGroupID() {\r
+ int max = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select MAX(groupid) from GROUPS");\r
+ if (rs.next()) {\r
+ max = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.info("getMaxSubID: " + e.getMessage());\r
+ e.printStackTrace();\r
+ }\r
+ return max;\r
+ }\r
+\r
+ public static Collection<String> getGroupsByClassfication(String classfication) {\r
+ List<String> list = new ArrayList<String>();\r
+ String sql = "select * from GROUPS where classification = '" + classfication + "'";\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ int groupid = rs.getInt("groupid");\r
+ //list.add(URLUtilities.generateSubscriptionURL(groupid));\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return list;\r
+ }\r
+\r
+ /**\r
+ * Return a count of the number of active subscriptions in the DB.\r
+ *\r
+ * @return the count\r
+ */\r
+ public static int countActiveSubscriptions() {\r
+ int count = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select count(*) from SUBSCRIPTIONS");\r
+ if (rs.next()) {\r
+ count = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.warn("PROV0008 countActiveSubscriptions: " + e.getMessage());\r
+ e.printStackTrace();\r
+ }\r
+ return count;\r
+ }\r
+\r
+ public Group() {\r
+ this("", "", "");\r
+ }\r
+\r
+ public Group(String name, String desc, String members) {\r
+ this.groupid = -1;\r
+ this.authid = "";\r
+ this.name = name;\r
+ this.description = desc;\r
+ this.members = members;\r
+ this.classification = "";\r
+ this.last_mod = new Date();\r
+ }\r
+\r
+\r
+ public Group(ResultSet rs) throws SQLException {\r
+ this.groupid = rs.getInt("GROUPID");\r
+ this.authid = rs.getString("AUTHID");\r
+ this.name = rs.getString("NAME");\r
+ this.description = rs.getString("DESCRIPTION");\r
+ this.classification = rs.getString("CLASSIFICATION");\r
+ this.members = rs.getString("MEMBERS");\r
+ this.last_mod = rs.getDate("LAST_MOD");\r
+ }\r
+\r
+\r
+ public Group(JSONObject jo) throws InvalidObjectException {\r
+ this("", "", "");\r
+ try {\r
+ // The JSONObject is assumed to contain a vnd.att-dr.group representation\r
+ this.groupid = jo.optInt("groupid", -1);\r
+ String gname = jo.getString("name");\r
+ String gdescription = jo.getString("description");\r
+\r
+ this.authid = jo.getString("authid");\r
+ this.name = gname;\r
+ this.description = gdescription;\r
+ this.classification = jo.getString("classification");\r
+ this.members = jo.getString("members");\r
+\r
+ if (gname.length() > 50)\r
+ throw new InvalidObjectException("Group name is too long");\r
+ if (gdescription.length() > 256)\r
+ throw new InvalidObjectException("Group Description is too long");\r
+ } catch (InvalidObjectException e) {\r
+ throw e;\r
+ } catch (Exception e) {\r
+ throw new InvalidObjectException("invalid JSON: " + e.getMessage());\r
+ }\r
+ }\r
+\r
+ public int getGroupid() {\r
+ return groupid;\r
+ }\r
+\r
+ public static Logger getIntlogger() {\r
+ return intlogger;\r
+ }\r
+\r
+ public void setGroupid(int groupid) {\r
+ this.groupid = groupid;\r
+ }\r
+\r
+ public static void setIntlogger(Logger intlogger) {\r
+ Group.intlogger = intlogger;\r
+ }\r
+\r
+ public static int getNext_groupid() {\r
+ return next_groupid;\r
+ }\r
+\r
+ public static void setNext_groupid(int next_groupid) {\r
+ Group.next_groupid = next_groupid;\r
+ }\r
+\r
+ public String getAuthid() {\r
+ return authid;\r
+ }\r
+\r
+ public void setAuthid(String authid) {\r
+ this.authid = authid;\r
+ }\r
+\r
+ public String getName() {\r
+ return name;\r
+ }\r
+\r
+ public void setName(String name) {\r
+ this.name = name;\r
+ }\r
+\r
+ public String getDescription() {\r
+ return description;\r
+ }\r
+\r
+ public void setDescription(String description) {\r
+ this.description = description;\r
+ }\r
+\r
+ public String getClassification() {\r
+ return classification;\r
+ }\r
+\r
+ public void setClassification(String classification) {\r
+ this.classification = classification;\r
+ }\r
+\r
+ public String getMembers() {\r
+ return members;\r
+ }\r
+\r
+ public void setMembers(String members) {\r
+ this.members = members;\r
+ }\r
+\r
+ public Date getLast_mod() {\r
+ return last_mod;\r
+ }\r
+\r
+ public void setLast_mod(Date last_mod) {\r
+ this.last_mod = last_mod;\r
+ }\r
+\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("groupid", groupid);\r
+ jo.put("authid", authid);\r
+ jo.put("name", name);\r
+ jo.put("description", description);\r
+ jo.put("classification", classification);\r
+ jo.put("members", members);\r
+ jo.put("last_mod", last_mod.getTime());\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean doInsert(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ if (groupid == -1) {\r
+ // No feed ID assigned yet, so assign the next available one\r
+ setGroupid(next_groupid++);\r
+ }\r
+ // In case we insert a gropup from synchronization\r
+ if (groupid > next_groupid)\r
+ next_groupid = groupid + 1;\r
+\r
+\r
+ // Create the GROUPS row\r
+ String sql = "insert into GROUPS (GROUPID, AUTHID, NAME, DESCRIPTION, CLASSIFICATION, MEMBERS) values (?, ?, ?, ?, ?, ?)";\r
+ ps = c.prepareStatement(sql, new String[]{"GROUPID"});\r
+ ps.setInt(1, groupid);\r
+ ps.setString(2, authid);\r
+ ps.setString(3, name);\r
+ ps.setString(4, description);\r
+ ps.setString(5, classification);\r
+ ps.setString(6, members);\r
+ ps.execute();\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "update GROUPS set AUTHID = ?, NAME = ?, DESCRIPTION = ?, CLASSIFICATION = ? , MEMBERS = ? where GROUPID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, authid);\r
+ ps.setString(2, name);\r
+ ps.setString(3, description);\r
+ ps.setString(4, classification);\r
+ ps.setString(5, members);\r
+ ps.setInt(6, groupid);\r
+ ps.executeUpdate();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "delete from GROUPS where GROUPID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, groupid);\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public String getKey() {\r
+ return "" + getGroupid();\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof Group))\r
+ return false;\r
+ Group os = (Group) obj;\r
+ if (groupid != os.groupid)\r
+ return false;\r
+ if (authid != os.authid)\r
+ return false;\r
+ if (!name.equals(os.name))\r
+ return false;\r
+ if (description != os.description)\r
+ return false;\r
+ if (!classification.equals(os.classification))\r
+ return false;\r
+ if (!members.equals(os.members))\r
+ return false;\r
+\r
+ return true;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return "GROUP: groupid=" + groupid;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: IngressRoute.java,v 1.3 2013/12/16 20:30:23 eby Exp $\r
*/\r
public class IngressRoute extends NodeClass implements Comparable<IngressRoute> {\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private final int seq;\r
- private final int feedid;\r
- private final String userid;\r
- private final String subnet;\r
- private int nodelist;\r
- private SortedSet<String> nodes;\r
-\r
- /**\r
- * Get all IngressRoutes in the database, sorted in order according to their sequence field.\r
- * @return a sorted set of IngressRoutes\r
- */\r
- public static SortedSet<IngressRoute> getAllIngressRoutes() {\r
- return getAllIngressRoutesForSQL("select SEQUENCE, FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES");\r
- }\r
- /**\r
- * Get all IngressRoutes in the database with a particular sequence number.\r
- * @param seq the sequence number\r
- * @return a set of IngressRoutes\r
- */\r
- public static Set<IngressRoute> getIngressRoutesForSeq(int seq) {\r
- return getAllIngressRoutesForSQL("select SEQUENCE, FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES where SEQUENCE = "+seq);\r
- }\r
- private static SortedSet<IngressRoute> getAllIngressRoutesForSQL(String sql) {\r
- SortedSet<IngressRoute> set = new TreeSet<IngressRoute>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- int seq = rs.getInt("SEQUENCE");\r
- int feedid = rs.getInt("FEEDID");\r
- String user = rs.getString("USERID");\r
- String subnet = rs.getString("SUBNET");\r
- int nodeset = rs.getInt("NODESET");\r
- set.add(new IngressRoute(seq, feedid, user, subnet, nodeset));\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return set;\r
- }\r
-\r
- /**\r
- * Get the maximum node set ID in use in the DB.\r
- * @return the integer value of the maximum\r
- */\r
- public static int getMaxNodeSetID() {\r
- return getMax("select max(SETID) as MAX from NODESETS");\r
- }\r
- /**\r
- * Get the maximum node sequence number in use in the DB.\r
- * @return the integer value of the maximum\r
- */\r
- public static int getMaxSequence() {\r
- return getMax("select max(SEQUENCE) as MAX from INGRESS_ROUTES");\r
- }\r
- private static int getMax(String sql) {\r
- int rv = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- if (rs.next()) {\r
- rv = rs.getInt("MAX");\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return rv;\r
- }\r
-\r
- /**\r
- * Get an Ingress Route for a particular feed ID, user, and subnet\r
- * @param feedid the Feed ID to look for\r
- * @param user the user name to look for\r
- * @param subnet the subnet to look for\r
- * @return the Ingress Route, or null of there is none\r
- */\r
- public static IngressRoute getIngressRoute(int feedid, String user, String subnet) {\r
- IngressRoute v = null;\r
- PreparedStatement ps = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- String sql = "select SEQUENCE, NODESET from INGRESS_ROUTES where FEEDID = ? AND USERID = ? and SUBNET = ?";\r
- ps = conn.prepareStatement(sql);\r
- ps.setInt(1, feedid);\r
- ps.setString(2, user);\r
- ps.setString(3, subnet);\r
- ResultSet rs = ps.executeQuery();\r
- if (rs.next()) {\r
- int seq = rs.getInt("SEQUENCE");\r
- int nodeset = rs.getInt("NODESET");\r
- v = new IngressRoute(seq, feedid, user, subnet, nodeset);\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return v;\r
- }\r
-\r
- /**\r
- * Get a collection of all Ingress Routes with a particular sequence number.\r
- * @param seq the sequence number to look for\r
- * @return the collection (may be empty).\r
- */\r
- public static Collection<IngressRoute> getIngressRoute(int seq) {\r
- Collection<IngressRoute> rv = new ArrayList<IngressRoute>();\r
- PreparedStatement ps = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- String sql = "select FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES where SEQUENCE = ?";\r
- ps = conn.prepareStatement(sql);\r
- ps.setInt(1, seq);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- int feedid = rs.getInt("FEEDID");\r
- String user = rs.getString("USERID");\r
- String subnet = rs.getString("SUBNET");\r
- int nodeset = rs.getInt("NODESET");\r
- rv.add(new IngressRoute(seq, feedid, user, subnet, nodeset));\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- public IngressRoute(int seq, int feedid, String user, String subnet, Collection<String> nodes)\r
- throws IllegalArgumentException\r
- {\r
- this(seq, feedid, user, subnet);\r
- this.nodelist = -1;\r
- this.nodes = new TreeSet<String>(nodes);\r
- }\r
-\r
- public IngressRoute(int seq, int feedid, String user, String subnet, int nodeset)\r
- throws IllegalArgumentException\r
- {\r
- this(seq, feedid, user, subnet);\r
- this.nodelist = nodeset;\r
- this.nodes = new TreeSet<String>(readNodes());\r
- }\r
-\r
- private IngressRoute(int seq, int feedid, String user, String subnet)\r
- throws IllegalArgumentException\r
- {\r
- this.seq = seq;\r
- this.feedid = feedid;\r
- this.userid = (user == null) ? "-" : user;\r
- this.subnet = (subnet == null) ? "-" : subnet;\r
- this.nodelist = -1;\r
- this.nodes = null;\r
- if (Feed.getFeedById(feedid) == null)\r
- throw new IllegalArgumentException("No such feed: "+feedid);\r
- if (!this.subnet.equals("-")) {\r
- SubnetMatcher sm = new SubnetMatcher(subnet);\r
- if (!sm.isValid())\r
- throw new IllegalArgumentException("Invalid subnet: "+subnet);\r
- }\r
- }\r
-\r
- public IngressRoute(JSONObject jo) {\r
- this.seq = jo.optInt("seq");\r
- this.feedid = jo.optInt("feedid");\r
- String t = jo.optString("user");\r
- this.userid = t.equals("") ? "-" : t;\r
- t = jo.optString("subnet");\r
- this.subnet = t.equals("") ? "-" : t;\r
- this.nodelist = -1;\r
- this.nodes = new TreeSet<String>();\r
- JSONArray ja = jo.getJSONArray("node");\r
- for (int i = 0; i < ja.length(); i++)\r
- this.nodes.add(ja.getString(i));\r
- }\r
- /**\r
- * Does this particular IngressRoute match a request, represented by feedid and req?\r
- * To match, <i>feedid</i> must match the feed ID in the route, the user in the route\r
- * (if specified) must match the user in the request, and the subnet in the route (if specified)\r
- * must match the subnet from the request.\r
- * @param feedid the feedid for this request\r
- * @param req the remainder of the request\r
- * @return true if a match, false otherwise\r
- */\r
- public boolean matches(int feedid, HttpServletRequest req) {\r
- // Check feedid\r
- if (this.feedid != feedid)\r
- return false;\r
-\r
- // Get user from request and compare\r
- // Note: we don't check the password; the node will do that\r
- if (userid.length() > 0 && !userid.equals("-")) {\r
- String credentials = req.getHeader("Authorization");\r
- if (credentials == null || !credentials.startsWith("Basic "))\r
- return false;\r
- String t = new String(Base64.decodeBase64(credentials.substring(6)));\r
- int ix = t.indexOf(':');\r
- if (ix >= 0)\r
- t = t.substring(0, ix);\r
- if (!t.equals(this.userid))\r
- return false;\r
- }\r
-\r
- // If this route has a subnet, match it against the requester's IP addr\r
- if (subnet.length() > 0 && !subnet.equals("-")) {\r
- try {\r
- InetAddress inet = InetAddress.getByName(req.getRemoteAddr());\r
- SubnetMatcher sm = new SubnetMatcher(subnet);\r
- return sm.matches(inet.getAddress());\r
- } catch (UnknownHostException e) {\r
- return false;\r
- }\r
- }\r
- return true;\r
- }\r
-\r
- /**\r
- * Compare IP addresses as byte arrays to a subnet specified as a CIDR.\r
- * Taken from org.onap.dmaap.datarouter.node.SubnetMatcher and modified somewhat.\r
- */\r
- public class SubnetMatcher {\r
- private byte[] sn;\r
- private int len;\r
- private int mask;\r
- private boolean valid;\r
-\r
- /**\r
- * Construct a subnet matcher given a CIDR\r
- * @param subnet The CIDR to match\r
- */\r
- public SubnetMatcher(String subnet) {\r
- int i = subnet.lastIndexOf('/');\r
- if (i == -1) {\r
- try {\r
- sn = InetAddress.getByName(subnet).getAddress();\r
- len = sn.length;\r
- valid = true;\r
- } catch (UnknownHostException e) {\r
- len = 0;\r
- valid = false;\r
- }\r
- mask = 0;\r
- } else {\r
- int n = Integer.parseInt(subnet.substring(i + 1));\r
- try {\r
- sn = InetAddress.getByName(subnet.substring(0, i)).getAddress();\r
- valid = true;\r
- } catch (UnknownHostException e) {\r
- valid = false;\r
- }\r
- len = n / 8;\r
- mask = ((0xff00) >> (n % 8)) & 0xff;\r
- }\r
- }\r
- public boolean isValid() {\r
- return valid;\r
- }\r
- /**\r
- * Is the IP address in the CIDR?\r
- * @param addr the IP address as bytes in network byte order\r
- * @return true if the IP address matches.\r
- */\r
- public boolean matches(byte[] addr) {\r
- if (!valid || addr.length != sn.length) {\r
- return false;\r
- }\r
- for (int i = 0; i < len; i++) {\r
- if (addr[i] != sn[i]) {\r
- return false;\r
- }\r
- }\r
- if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {\r
- return false;\r
- }\r
- return true;\r
- }\r
- }\r
-\r
- /**\r
- * Get the list of node names for this route.\r
- * @return the list\r
- */\r
- public SortedSet<String> getNodes() {\r
- return this.nodes;\r
- }\r
-\r
- private Collection<String> readNodes() {\r
- Collection<String> set = new TreeSet<String>();\r
- PreparedStatement ps = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- String sql = "select NODEID from NODESETS where SETID = ?";\r
- ps = conn.prepareStatement(sql);\r
- ps.setInt(1, nodelist);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- int id = rs.getInt("NODEID");\r
- set.add(lookupNodeID(id));\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return set;\r
- }\r
-\r
- /**\r
- * Delete the IRT route having this IngressRoutes feed ID, user ID, and subnet from the database.\r
- * @return true if the delete succeeded\r
- */\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- ps = c.prepareStatement("delete from INGRESS_ROUTES where FEEDID = ? and USERID = ? and SUBNET = ?");\r
- ps.setInt(1, feedid);\r
- ps.setString(2, userid);\r
- ps.setString(3, subnet);\r
- ps.execute();\r
- ps.close();\r
-\r
- ps = c.prepareStatement("delete from NODESETS where SETID = ?");\r
- ps.setInt(1, nodelist);\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @SuppressWarnings("resource")\r
- @Override\r
- public boolean doInsert(Connection c) {\r
- boolean rv = false;\r
- PreparedStatement ps = null;\r
- try {\r
- // Create the NODESETS rows & set nodelist\r
- int set = getMaxNodeSetID() + 1;\r
- this.nodelist = set;\r
- for (String node : nodes) {\r
- int id = lookupNodeName(node);\r
- ps = c.prepareStatement("insert into NODESETS (SETID, NODEID) values (?,?)");\r
- ps.setInt(1, this.nodelist);\r
- ps.setInt(2, id);\r
- ps.execute();\r
- ps.close();\r
- }\r
-\r
- // Create the INGRESS_ROUTES row\r
- ps = c.prepareStatement("insert into INGRESS_ROUTES (SEQUENCE, FEEDID, USERID, SUBNET, NODESET) values (?, ?, ?, ?, ?)");\r
- ps.setInt(1, this.seq);\r
- ps.setInt(2, this.feedid);\r
- ps.setString(3, this.userid);\r
- ps.setString(4, this.subnet);\r
- ps.setInt(5, this.nodelist);\r
- ps.execute();\r
- ps.close();\r
- rv = true;\r
- } catch (SQLException e) {\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- return doDelete(c) && doInsert(c);\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("feedid", feedid);\r
- // Note: for user and subnet, null, "", and "-" are equivalent\r
- if (userid != null && !userid.equals("-") && !userid.equals(""))\r
- jo.put("user", userid);\r
- if (subnet != null && !subnet.equals("-") && !subnet.equals(""))\r
- jo.put("subnet", subnet);\r
- jo.put("seq", seq);\r
- jo.put("node", nodes);\r
- return jo;\r
- }\r
-\r
- @Override\r
- public String getKey() {\r
- return String.format("%d/%s/%s/%d", feedid, (userid == null)?"":userid, (subnet == null)?"":subnet, seq);\r
- }\r
-\r
- @Override\r
- public int hashCode() {\r
- return toString().hashCode();\r
- }\r
-\r
- @Override\r
- public boolean equals(Object obj) {\r
- try {\r
- if (!(obj instanceof IngressRoute))\r
- return false;\r
- return this.compareTo((IngressRoute) obj) == 0;\r
- } catch (NullPointerException e) {\r
- return false;\r
- }\r
- }\r
-\r
- @Override\r
- public int compareTo(IngressRoute in) {\r
- if (in == null)\r
- throw new NullPointerException();\r
- int n = this.feedid - in.feedid;\r
- if (n != 0)\r
- return n;\r
- n = this.seq - in.seq;\r
- if (n != 0)\r
- return n;\r
- n = this.userid.compareTo(in.userid);\r
- if (n != 0)\r
- return n;\r
- n = this.subnet.compareTo(in.subnet);\r
- if (n != 0)\r
- return n;\r
- return this.nodes.equals(in.nodes) ? 0 : 1;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return String.format("INGRESS: feed=%d, userid=%s, subnet=%s, seq=%d", feedid, (userid == null)?"":userid, (subnet == null)?"":subnet, seq);\r
- }\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private final int seq;\r
+ private final int feedid;\r
+ private final String userid;\r
+ private final String subnet;\r
+ private int nodelist;\r
+ private SortedSet<String> nodes;\r
+\r
+ /**\r
+ * Get all IngressRoutes in the database, sorted in order according to their sequence field.\r
+ *\r
+ * @return a sorted set of IngressRoutes\r
+ */\r
+ public static SortedSet<IngressRoute> getAllIngressRoutes() {\r
+ return getAllIngressRoutesForSQL("select SEQUENCE, FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES");\r
+ }\r
+\r
+ /**\r
+ * Get all IngressRoutes in the database with a particular sequence number.\r
+ *\r
+ * @param seq the sequence number\r
+ * @return a set of IngressRoutes\r
+ */\r
+ public static Set<IngressRoute> getIngressRoutesForSeq(int seq) {\r
+ return getAllIngressRoutesForSQL("select SEQUENCE, FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES where SEQUENCE = " + seq);\r
+ }\r
+\r
+ private static SortedSet<IngressRoute> getAllIngressRoutesForSQL(String sql) {\r
+ SortedSet<IngressRoute> set = new TreeSet<IngressRoute>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ int seq = rs.getInt("SEQUENCE");\r
+ int feedid = rs.getInt("FEEDID");\r
+ String user = rs.getString("USERID");\r
+ String subnet = rs.getString("SUBNET");\r
+ int nodeset = rs.getInt("NODESET");\r
+ set.add(new IngressRoute(seq, feedid, user, subnet, nodeset));\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return set;\r
+ }\r
+\r
+ /**\r
+ * Get the maximum node set ID in use in the DB.\r
+ *\r
+ * @return the integer value of the maximum\r
+ */\r
+ public static int getMaxNodeSetID() {\r
+ return getMax("select max(SETID) as MAX from NODESETS");\r
+ }\r
+\r
+ /**\r
+ * Get the maximum node sequence number in use in the DB.\r
+ *\r
+ * @return the integer value of the maximum\r
+ */\r
+ public static int getMaxSequence() {\r
+ return getMax("select max(SEQUENCE) as MAX from INGRESS_ROUTES");\r
+ }\r
+\r
+ private static int getMax(String sql) {\r
+ int rv = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ if (rs.next()) {\r
+ rv = rs.getInt("MAX");\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ /**\r
+ * Get an Ingress Route for a particular feed ID, user, and subnet\r
+ *\r
+ * @param feedid the Feed ID to look for\r
+ * @param user the user name to look for\r
+ * @param subnet the subnet to look for\r
+ * @return the Ingress Route, or null of there is none\r
+ */\r
+ public static IngressRoute getIngressRoute(int feedid, String user, String subnet) {\r
+ IngressRoute v = null;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ String sql = "select SEQUENCE, NODESET from INGRESS_ROUTES where FEEDID = ? AND USERID = ? and SUBNET = ?";\r
+ ps = conn.prepareStatement(sql);\r
+ ps.setInt(1, feedid);\r
+ ps.setString(2, user);\r
+ ps.setString(3, subnet);\r
+ ResultSet rs = ps.executeQuery();\r
+ if (rs.next()) {\r
+ int seq = rs.getInt("SEQUENCE");\r
+ int nodeset = rs.getInt("NODESET");\r
+ v = new IngressRoute(seq, feedid, user, subnet, nodeset);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return v;\r
+ }\r
+\r
+ /**\r
+ * Get a collection of all Ingress Routes with a particular sequence number.\r
+ *\r
+ * @param seq the sequence number to look for\r
+ * @return the collection (may be empty).\r
+ */\r
+ public static Collection<IngressRoute> getIngressRoute(int seq) {\r
+ Collection<IngressRoute> rv = new ArrayList<IngressRoute>();\r
+ PreparedStatement ps = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ String sql = "select FEEDID, USERID, SUBNET, NODESET from INGRESS_ROUTES where SEQUENCE = ?";\r
+ ps = conn.prepareStatement(sql);\r
+ ps.setInt(1, seq);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ int feedid = rs.getInt("FEEDID");\r
+ String user = rs.getString("USERID");\r
+ String subnet = rs.getString("SUBNET");\r
+ int nodeset = rs.getInt("NODESET");\r
+ rv.add(new IngressRoute(seq, feedid, user, subnet, nodeset));\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ public IngressRoute(int seq, int feedid, String user, String subnet, Collection<String> nodes)\r
+ throws IllegalArgumentException {\r
+ this(seq, feedid, user, subnet);\r
+ this.nodelist = -1;\r
+ this.nodes = new TreeSet<String>(nodes);\r
+ }\r
+\r
+ public IngressRoute(int seq, int feedid, String user, String subnet, int nodeset)\r
+ throws IllegalArgumentException {\r
+ this(seq, feedid, user, subnet);\r
+ this.nodelist = nodeset;\r
+ this.nodes = new TreeSet<String>(readNodes());\r
+ }\r
+\r
+ private IngressRoute(int seq, int feedid, String user, String subnet)\r
+ throws IllegalArgumentException {\r
+ this.seq = seq;\r
+ this.feedid = feedid;\r
+ this.userid = (user == null) ? "-" : user;\r
+ this.subnet = (subnet == null) ? "-" : subnet;\r
+ this.nodelist = -1;\r
+ this.nodes = null;\r
+ if (Feed.getFeedById(feedid) == null)\r
+ throw new IllegalArgumentException("No such feed: " + feedid);\r
+ if (!this.subnet.equals("-")) {\r
+ SubnetMatcher sm = new SubnetMatcher(subnet);\r
+ if (!sm.isValid())\r
+ throw new IllegalArgumentException("Invalid subnet: " + subnet);\r
+ }\r
+ }\r
+\r
+ public IngressRoute(JSONObject jo) {\r
+ this.seq = jo.optInt("seq");\r
+ this.feedid = jo.optInt("feedid");\r
+ String t = jo.optString("user");\r
+ this.userid = t.equals("") ? "-" : t;\r
+ t = jo.optString("subnet");\r
+ this.subnet = t.equals("") ? "-" : t;\r
+ this.nodelist = -1;\r
+ this.nodes = new TreeSet<String>();\r
+ JSONArray ja = jo.getJSONArray("node");\r
+ for (int i = 0; i < ja.length(); i++)\r
+ this.nodes.add(ja.getString(i));\r
+ }\r
+\r
+ /**\r
+ * Does this particular IngressRoute match a request, represented by feedid and req?\r
+ * To match, <i>feedid</i> must match the feed ID in the route, the user in the route\r
+ * (if specified) must match the user in the request, and the subnet in the route (if specified)\r
+ * must match the subnet from the request.\r
+ *\r
+ * @param feedid the feedid for this request\r
+ * @param req the remainder of the request\r
+ * @return true if a match, false otherwise\r
+ */\r
+ public boolean matches(int feedid, HttpServletRequest req) {\r
+ // Check feedid\r
+ if (this.feedid != feedid)\r
+ return false;\r
+\r
+ // Get user from request and compare\r
+ // Note: we don't check the password; the node will do that\r
+ if (userid.length() > 0 && !userid.equals("-")) {\r
+ String credentials = req.getHeader("Authorization");\r
+ if (credentials == null || !credentials.startsWith("Basic "))\r
+ return false;\r
+ String t = new String(Base64.decodeBase64(credentials.substring(6)));\r
+ int ix = t.indexOf(':');\r
+ if (ix >= 0)\r
+ t = t.substring(0, ix);\r
+ if (!t.equals(this.userid))\r
+ return false;\r
+ }\r
+\r
+ // If this route has a subnet, match it against the requester's IP addr\r
+ if (subnet.length() > 0 && !subnet.equals("-")) {\r
+ try {\r
+ InetAddress inet = InetAddress.getByName(req.getRemoteAddr());\r
+ SubnetMatcher sm = new SubnetMatcher(subnet);\r
+ return sm.matches(inet.getAddress());\r
+ } catch (UnknownHostException e) {\r
+ return false;\r
+ }\r
+ }\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Compare IP addresses as byte arrays to a subnet specified as a CIDR.\r
+ * Taken from org.onap.dmaap.datarouter.node.SubnetMatcher and modified somewhat.\r
+ */\r
+ public class SubnetMatcher {\r
+ private byte[] sn;\r
+ private int len;\r
+ private int mask;\r
+ private boolean valid;\r
+\r
+ /**\r
+ * Construct a subnet matcher given a CIDR\r
+ *\r
+ * @param subnet The CIDR to match\r
+ */\r
+ public SubnetMatcher(String subnet) {\r
+ int i = subnet.lastIndexOf('/');\r
+ if (i == -1) {\r
+ try {\r
+ sn = InetAddress.getByName(subnet).getAddress();\r
+ len = sn.length;\r
+ valid = true;\r
+ } catch (UnknownHostException e) {\r
+ len = 0;\r
+ valid = false;\r
+ }\r
+ mask = 0;\r
+ } else {\r
+ int n = Integer.parseInt(subnet.substring(i + 1));\r
+ try {\r
+ sn = InetAddress.getByName(subnet.substring(0, i)).getAddress();\r
+ valid = true;\r
+ } catch (UnknownHostException e) {\r
+ valid = false;\r
+ }\r
+ len = n / 8;\r
+ mask = ((0xff00) >> (n % 8)) & 0xff;\r
+ }\r
+ }\r
+\r
+ public boolean isValid() {\r
+ return valid;\r
+ }\r
+\r
+ /**\r
+ * Is the IP address in the CIDR?\r
+ *\r
+ * @param addr the IP address as bytes in network byte order\r
+ * @return true if the IP address matches.\r
+ */\r
+ public boolean matches(byte[] addr) {\r
+ if (!valid || addr.length != sn.length) {\r
+ return false;\r
+ }\r
+ for (int i = 0; i < len; i++) {\r
+ if (addr[i] != sn[i]) {\r
+ return false;\r
+ }\r
+ }\r
+ if (mask != 0 && ((addr[len] ^ sn[len]) & mask) != 0) {\r
+ return false;\r
+ }\r
+ return true;\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Get the list of node names for this route.\r
+ *\r
+ * @return the list\r
+ */\r
+ public SortedSet<String> getNodes() {\r
+ return this.nodes;\r
+ }\r
+\r
+ private Collection<String> readNodes() {\r
+ Collection<String> set = new TreeSet<String>();\r
+ PreparedStatement ps = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ String sql = "select NODEID from NODESETS where SETID = ?";\r
+ ps = conn.prepareStatement(sql);\r
+ ps.setInt(1, nodelist);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ int id = rs.getInt("NODEID");\r
+ set.add(lookupNodeID(id));\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return set;\r
+ }\r
+\r
+ /**\r
+ * Delete the IRT route having this IngressRoutes feed ID, user ID, and subnet from the database.\r
+ *\r
+ * @return true if the delete succeeded\r
+ */\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ ps = c.prepareStatement("delete from INGRESS_ROUTES where FEEDID = ? and USERID = ? and SUBNET = ?");\r
+ ps.setInt(1, feedid);\r
+ ps.setString(2, userid);\r
+ ps.setString(3, subnet);\r
+ ps.execute();\r
+ ps.close();\r
+\r
+ ps = c.prepareStatement("delete from NODESETS where SETID = ?");\r
+ ps.setInt(1, nodelist);\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @SuppressWarnings("resource")\r
+ @Override\r
+ public boolean doInsert(Connection c) {\r
+ boolean rv = false;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ // Create the NODESETS rows & set nodelist\r
+ int set = getMaxNodeSetID() + 1;\r
+ this.nodelist = set;\r
+ for (String node : nodes) {\r
+ int id = lookupNodeName(node);\r
+ ps = c.prepareStatement("insert into NODESETS (SETID, NODEID) values (?,?)");\r
+ ps.setInt(1, this.nodelist);\r
+ ps.setInt(2, id);\r
+ ps.execute();\r
+ ps.close();\r
+ }\r
+\r
+ // Create the INGRESS_ROUTES row\r
+ ps = c.prepareStatement("insert into INGRESS_ROUTES (SEQUENCE, FEEDID, USERID, SUBNET, NODESET) values (?, ?, ?, ?, ?)");\r
+ ps.setInt(1, this.seq);\r
+ ps.setInt(2, this.feedid);\r
+ ps.setString(3, this.userid);\r
+ ps.setString(4, this.subnet);\r
+ ps.setInt(5, this.nodelist);\r
+ ps.execute();\r
+ ps.close();\r
+ rv = true;\r
+ } catch (SQLException e) {\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ return doDelete(c) && doInsert(c);\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("feedid", feedid);\r
+ // Note: for user and subnet, null, "", and "-" are equivalent\r
+ if (userid != null && !userid.equals("-") && !userid.equals(""))\r
+ jo.put("user", userid);\r
+ if (subnet != null && !subnet.equals("-") && !subnet.equals(""))\r
+ jo.put("subnet", subnet);\r
+ jo.put("seq", seq);\r
+ jo.put("node", nodes);\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public String getKey() {\r
+ return String.format("%d/%s/%s/%d", feedid, (userid == null) ? "" : userid, (subnet == null) ? "" : subnet, seq);\r
+ }\r
+\r
+ @Override\r
+ public int hashCode() {\r
+ return toString().hashCode();\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ try {\r
+ if (!(obj instanceof IngressRoute))\r
+ return false;\r
+ return this.compareTo((IngressRoute) obj) == 0;\r
+ } catch (NullPointerException e) {\r
+ return false;\r
+ }\r
+ }\r
+\r
+ @Override\r
+ public int compareTo(IngressRoute in) {\r
+ if (in == null)\r
+ throw new NullPointerException();\r
+ int n = this.feedid - in.feedid;\r
+ if (n != 0)\r
+ return n;\r
+ n = this.seq - in.seq;\r
+ if (n != 0)\r
+ return n;\r
+ n = this.userid.compareTo(in.userid);\r
+ if (n != 0)\r
+ return n;\r
+ n = this.subnet.compareTo(in.subnet);\r
+ if (n != 0)\r
+ return n;\r
+ return this.nodes.equals(in.nodes) ? 0 : 1;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return String.format("INGRESS: feed=%d, userid=%s, subnet=%s, seq=%d", feedid, (userid == null) ? "" : userid, (subnet == null) ? "" : subnet, seq);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* An object that can be INSERT-ed into the database.\r
+ *\r
* @author Robert Eby\r
* @version $Id: Insertable.java,v 1.2 2013/05/29 14:44:36 eby Exp $\r
*/\r
public interface Insertable {\r
- /**\r
- * Insert this object into the DB.\r
- * @param c the JDBC Connection to use\r
- * @return true if the INSERT succeeded, false otherwise\r
- */\r
- public boolean doInsert(Connection c);\r
+ /**\r
+ * Insert this object into the DB.\r
+ *\r
+ * @param c the JDBC Connection to use\r
+ * @return true if the INSERT succeeded, false otherwise\r
+ */\r
+ public boolean doInsert(Connection c);\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* An object that can be represented as a {@link JSONObject}.\r
+ *\r
* @author Robert Eby\r
* @version $Id: JSONable.java,v 1.1 2013/04/26 21:00:26 eby Exp $\r
*/\r
public interface JSONable {\r
- /**\r
- * Get a JSONObject representing this object.\r
- * @return the JSONObject\r
- */\r
- public JSONObject asJSONObject();\r
+ /**\r
+ * Get a JSONObject representing this object.\r
+ *\r
+ * @return the JSONObject\r
+ */\r
+ public JSONObject asJSONObject();\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* An object that can be represented as a {@link LOGJSONObject}.\r
+ *\r
* @author Robert Eby\r
* @version $Id: JSONable.java,v 1.1 2013/04/26 21:00:26 eby Exp $\r
*/\r
public interface LOGJSONable {\r
- /**\r
- * Get a JSONObject representing this object.\r
- * @return the JSONObject\r
- */\r
- public LOGJSONObject asJSONObject();\r
+ /**\r
+ * Get a JSONObject representing this object.\r
+ *\r
+ * @return the JSONObject\r
+ */\r
+ public LOGJSONObject asJSONObject();\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: Loadable.java,v 1.2 2013/08/06 13:28:33 eby Exp $\r
*/\r
public interface Loadable {\r
- /**\r
- * Load the 18 fields in the PreparedStatement <i>ps</i>. The fields are:\r
- * <ol>\r
- * <li>type (String)</li>\r
- * <li>event_time (long)</li>\r
- * <li>publish ID (String)</li>\r
- * <li>feed ID (int)</li>\r
- * <li>request URI (String)</li>\r
- * <li>method (String)</li>\r
- * <li>content type (String)</li>\r
- * <li>content length (long)</li>\r
- * <li>feed File ID (String)</li>\r
- * <li>remote address (String)</li>\r
- * <li>user (String)</li>\r
- * <li>status (int)</li>\r
- * <li>delivery subscriber id (int)</li>\r
- * <li>delivery File ID (String)</li>\r
- * <li>result (int)</li>\r
- * <li>attempts (int)</li>\r
- * <li>reason (String)</li>\r
- * <li>record ID (long)</li>\r
- * </ol>\r
- * @param ps the PreparedStatement to load\r
- */\r
- public void load(PreparedStatement ps) throws SQLException;\r
+ /**\r
+ * Load the 18 fields in the PreparedStatement <i>ps</i>. The fields are:\r
+ * <ol>\r
+ * <li>type (String)</li>\r
+ * <li>event_time (long)</li>\r
+ * <li>publish ID (String)</li>\r
+ * <li>feed ID (int)</li>\r
+ * <li>request URI (String)</li>\r
+ * <li>method (String)</li>\r
+ * <li>content type (String)</li>\r
+ * <li>content length (long)</li>\r
+ * <li>feed File ID (String)</li>\r
+ * <li>remote address (String)</li>\r
+ * <li>user (String)</li>\r
+ * <li>status (int)</li>\r
+ * <li>delivery subscriber id (int)</li>\r
+ * <li>delivery File ID (String)</li>\r
+ * <li>result (int)</li>\r
+ * <li>attempts (int)</li>\r
+ * <li>reason (String)</li>\r
+ * <li>record ID (long)</li>\r
+ * </ol>\r
+ *\r
+ * @param ps the PreparedStatement to load\r
+ */\r
+ public void load(PreparedStatement ps) throws SQLException;\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
/**\r
* The representation of a Log Record, as retrieved from the DB. Since this record format is only used\r
* to replicate between provisioning servers, it is very bare-bones; e.g. there are no field setters and only 1 getter.\r
+ *\r
* @author Robert Eby\r
* @version $Id: LogRecord.java,v 1.7 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class LogRecord extends BaseLogRecord {\r
- /**\r
- * Print all log records whose RECORD_IDs are in the bit set provided.\r
- * @param os the {@link OutputStream} to print the records on\r
- * @param bs the {@link RLEBitSet} listing the record IDs to print\r
- * @throws IOException\r
- */\r
- public static void printLogRecords(OutputStream os, RLEBitSet bs) throws IOException {\r
- final String sql = "select * from LOG_RECORDS where RECORD_ID >= ? AND RECORD_ID <= ?";\r
- DB db = new DB();\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- Iterator<Long[]> iter = bs.getRangeIterator();\r
- PreparedStatement ps = conn.prepareStatement(sql);\r
- while (iter.hasNext()) {\r
- Long[] n = iter.next();\r
- ps.setLong(1, n[0]);\r
- ps.setLong(2, n[1]);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- LogRecord lr = new LogRecord(rs);\r
- os.write(lr.toString().getBytes());\r
- }\r
- rs.close();\r
- ps.clearParameters();\r
- }\r
- ps.close();\r
- stmt.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- if (conn != null)\r
- db.release(conn);\r
- }\r
- }\r
-\r
- private final String type;\r
- private final String feedFileid;\r
- private final String remoteAddr;\r
- private final String user;\r
- private final int status;\r
- private final int subid;\r
- private final String fileid;\r
- private final int result;\r
- private final int attempts;\r
- private final String reason;\r
- private final long record_id;\r
- private final long clength2;\r
-\r
- public LogRecord(ResultSet rs) throws SQLException {\r
- super(rs);\r
- this.type = rs.getString("TYPE");\r
- this.feedFileid = rs.getString("FEED_FILEID");\r
- this.remoteAddr = rs.getString("REMOTE_ADDR");\r
- this.user = rs.getString("USER");\r
- this.status = rs.getInt("STATUS");\r
-\r
- this.subid = rs.getInt("DELIVERY_SUBID");\r
- this.fileid = rs.getString("DELIVERY_FILEID");\r
- this.result = rs.getInt("RESULT");\r
-\r
- this.attempts = rs.getInt("ATTEMPTS");\r
- this.reason = rs.getString("REASON");\r
-\r
- this.record_id = rs.getLong("RECORD_ID");\r
- this.clength2 = rs.getLong("CONTENT_LENGTH_2");\r
- }\r
- public LogRecord(String[] pp) throws ParseException {\r
- super(pp);\r
- this.type = pp[8];\r
- this.feedFileid = pp[9];\r
- this.remoteAddr = pp[10];\r
- this.user = pp[11];\r
- this.status = Integer.parseInt(pp[12]);\r
-\r
- this.subid = Integer.parseInt(pp[13]);\r
- this.fileid = pp[14];\r
- this.result = Integer.parseInt(pp[15]);\r
-\r
- this.attempts = Integer.parseInt(pp[16]);\r
- this.reason = pp[17];\r
-\r
- this.record_id = Long.parseLong(pp[18]);\r
- this.clength2 = (pp.length == 20) ? Long.parseLong(pp[19]) : 0;\r
- }\r
-\r
- public long getRecordId() {\r
- return record_id;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return\r
- sdf.format(getEventTime()) + "|"\r
- + "LOG|"\r
- + getPublishId() + "|"\r
- + getFeedid() + "|"\r
- + getRequestUri() + "|"\r
- + getMethod() + "|"\r
- + getContentType() + "|"\r
- + getContentLength() + "|"\r
- + type + "|"\r
- + feedFileid + "|"\r
- + remoteAddr + "|"\r
- + user + "|"\r
- + status + "|"\r
- + subid + "|"\r
- + fileid + "|"\r
- + result + "|"\r
- + attempts + "|"\r
- + reason + "|"\r
- + record_id + "|"\r
- + clength2\r
- + "\n";\r
- }\r
-\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setString(1, type);\r
- super.load(ps); // loads fields 2-8\r
- if (type.equals("pub")) {\r
- ps.setString(9, feedFileid);\r
- ps.setString(10, remoteAddr);\r
- ps.setString(11, user);\r
- ps.setInt (12, status);\r
- ps.setNull (13, Types.INTEGER);\r
- ps.setNull (14, Types.VARCHAR);\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setLong (18, record_id);\r
- ps.setNull (19, Types.BIGINT);\r
- } else if (type.equals("del")) {\r
- ps.setNull (9, Types.VARCHAR);\r
- ps.setNull (10, Types.VARCHAR);\r
- ps.setString(11, user);\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setInt (13, subid);\r
- ps.setString(14, fileid);\r
- ps.setInt (15, result);\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setLong (18, record_id);\r
- ps.setNull (19, Types.BIGINT);\r
- } else if (type.equals("exp")) {\r
- ps.setNull (9, Types.VARCHAR);\r
- ps.setNull (10, Types.VARCHAR);\r
- ps.setNull (11, Types.VARCHAR);\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setInt (13, subid);\r
- ps.setString(14, fileid);\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setInt (16, attempts);\r
- ps.setString(17, reason);\r
- ps.setLong (18, record_id);\r
- ps.setNull (19, Types.BIGINT);\r
- } else if (type.equals("pbf")) {\r
- ps.setString( 9, feedFileid);\r
- ps.setString(10, remoteAddr);\r
- ps.setString(11, user);\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setNull (13, Types.INTEGER);\r
- ps.setNull (14, Types.VARCHAR);\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setLong (18, record_id);\r
- ps.setLong (19, clength2);\r
- } else if (type.equals("dlx")) {\r
- ps.setNull ( 9, Types.VARCHAR);\r
- ps.setNull (10, Types.VARCHAR);\r
- ps.setNull (11, Types.VARCHAR);\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setInt (13, subid);\r
- ps.setNull (14, Types.VARCHAR);\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setLong (18, record_id);\r
- ps.setLong (19, clength2);\r
- }\r
- }\r
-\r
- public static void main(String[] a) throws IOException {\r
- LogRecord.printLogRecords(System.out, new RLEBitSet(a[0]));\r
- }\r
+ /**\r
+ * Print all log records whose RECORD_IDs are in the bit set provided.\r
+ *\r
+ * @param os the {@link OutputStream} to print the records on\r
+ * @param bs the {@link RLEBitSet} listing the record IDs to print\r
+ * @throws IOException\r
+ */\r
+ public static void printLogRecords(OutputStream os, RLEBitSet bs) throws IOException {\r
+ final String sql = "select * from LOG_RECORDS where RECORD_ID >= ? AND RECORD_ID <= ?";\r
+ DB db = new DB();\r
+ Connection conn = null;\r
+ try {\r
+ conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ Iterator<Long[]> iter = bs.getRangeIterator();\r
+ PreparedStatement ps = conn.prepareStatement(sql);\r
+ while (iter.hasNext()) {\r
+ Long[] n = iter.next();\r
+ ps.setLong(1, n[0]);\r
+ ps.setLong(2, n[1]);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ LogRecord lr = new LogRecord(rs);\r
+ os.write(lr.toString().getBytes());\r
+ }\r
+ rs.close();\r
+ ps.clearParameters();\r
+ }\r
+ ps.close();\r
+ stmt.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ if (conn != null)\r
+ db.release(conn);\r
+ }\r
+ }\r
+\r
+ private final String type;\r
+ private final String feedFileid;\r
+ private final String remoteAddr;\r
+ private final String user;\r
+ private final int status;\r
+ private final int subid;\r
+ private final String fileid;\r
+ private final int result;\r
+ private final int attempts;\r
+ private final String reason;\r
+ private final long record_id;\r
+ private final long clength2;\r
+\r
+ public LogRecord(ResultSet rs) throws SQLException {\r
+ super(rs);\r
+ this.type = rs.getString("TYPE");\r
+ this.feedFileid = rs.getString("FEED_FILEID");\r
+ this.remoteAddr = rs.getString("REMOTE_ADDR");\r
+ this.user = rs.getString("USER");\r
+ this.status = rs.getInt("STATUS");\r
+\r
+ this.subid = rs.getInt("DELIVERY_SUBID");\r
+ this.fileid = rs.getString("DELIVERY_FILEID");\r
+ this.result = rs.getInt("RESULT");\r
+\r
+ this.attempts = rs.getInt("ATTEMPTS");\r
+ this.reason = rs.getString("REASON");\r
+\r
+ this.record_id = rs.getLong("RECORD_ID");\r
+ this.clength2 = rs.getLong("CONTENT_LENGTH_2");\r
+ }\r
+\r
+ public LogRecord(String[] pp) throws ParseException {\r
+ super(pp);\r
+ this.type = pp[8];\r
+ this.feedFileid = pp[9];\r
+ this.remoteAddr = pp[10];\r
+ this.user = pp[11];\r
+ this.status = Integer.parseInt(pp[12]);\r
+\r
+ this.subid = Integer.parseInt(pp[13]);\r
+ this.fileid = pp[14];\r
+ this.result = Integer.parseInt(pp[15]);\r
+\r
+ this.attempts = Integer.parseInt(pp[16]);\r
+ this.reason = pp[17];\r
+\r
+ this.record_id = Long.parseLong(pp[18]);\r
+ this.clength2 = (pp.length == 20) ? Long.parseLong(pp[19]) : 0;\r
+ }\r
+\r
+ public long getRecordId() {\r
+ return record_id;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return\r
+ sdf.format(getEventTime()) + "|"\r
+ + "LOG|"\r
+ + getPublishId() + "|"\r
+ + getFeedid() + "|"\r
+ + getRequestUri() + "|"\r
+ + getMethod() + "|"\r
+ + getContentType() + "|"\r
+ + getContentLength() + "|"\r
+ + type + "|"\r
+ + feedFileid + "|"\r
+ + remoteAddr + "|"\r
+ + user + "|"\r
+ + status + "|"\r
+ + subid + "|"\r
+ + fileid + "|"\r
+ + result + "|"\r
+ + attempts + "|"\r
+ + reason + "|"\r
+ + record_id + "|"\r
+ + clength2\r
+ + "\n";\r
+ }\r
+\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setString(1, type);\r
+ super.load(ps); // loads fields 2-8\r
+ if (type.equals("pub")) {\r
+ ps.setString(9, feedFileid);\r
+ ps.setString(10, remoteAddr);\r
+ ps.setString(11, user);\r
+ ps.setInt(12, status);\r
+ ps.setNull(13, Types.INTEGER);\r
+ ps.setNull(14, Types.VARCHAR);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setLong(18, record_id);\r
+ ps.setNull(19, Types.BIGINT);\r
+ } else if (type.equals("del")) {\r
+ ps.setNull(9, Types.VARCHAR);\r
+ ps.setNull(10, Types.VARCHAR);\r
+ ps.setString(11, user);\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setInt(13, subid);\r
+ ps.setString(14, fileid);\r
+ ps.setInt(15, result);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setLong(18, record_id);\r
+ ps.setNull(19, Types.BIGINT);\r
+ } else if (type.equals("exp")) {\r
+ ps.setNull(9, Types.VARCHAR);\r
+ ps.setNull(10, Types.VARCHAR);\r
+ ps.setNull(11, Types.VARCHAR);\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setInt(13, subid);\r
+ ps.setString(14, fileid);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setInt(16, attempts);\r
+ ps.setString(17, reason);\r
+ ps.setLong(18, record_id);\r
+ ps.setNull(19, Types.BIGINT);\r
+ } else if (type.equals("pbf")) {\r
+ ps.setString(9, feedFileid);\r
+ ps.setString(10, remoteAddr);\r
+ ps.setString(11, user);\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setNull(13, Types.INTEGER);\r
+ ps.setNull(14, Types.VARCHAR);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setLong(18, record_id);\r
+ ps.setLong(19, clength2);\r
+ } else if (type.equals("dlx")) {\r
+ ps.setNull(9, Types.VARCHAR);\r
+ ps.setNull(10, Types.VARCHAR);\r
+ ps.setNull(11, Types.VARCHAR);\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setInt(13, subid);\r
+ ps.setNull(14, Types.VARCHAR);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setLong(18, record_id);\r
+ ps.setLong(19, clength2);\r
+ }\r
+ }\r
+\r
+ public static void main(String[] a) throws IOException {\r
+ LogRecord.printLogRecords(System.out, new RLEBitSet(a[0]));\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: NetworkRoute.java,v 1.2 2013/12/16 20:30:23 eby Exp $\r
*/\r
public class NetworkRoute extends NodeClass implements Comparable<NetworkRoute> {\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private final int fromnode;\r
- private final int tonode;\r
- private final int vianode;\r
-\r
- /**\r
- * Get a set of all Network Routes in the DB. The set is sorted according to the natural sorting order\r
- * of the routes (based on the from and to node names in each route).\r
- * @return the sorted set\r
- */\r
- public static SortedSet<NetworkRoute> getAllNetworkRoutes() {\r
- SortedSet<NetworkRoute> set = new TreeSet<NetworkRoute>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select FROMNODE, TONODE, VIANODE from NETWORK_ROUTES");\r
- while (rs.next()) {\r
- int fromnode = rs.getInt("FROMNODE");\r
- int tonode = rs.getInt("TONODE");\r
- int vianode = rs.getInt("VIANODE");\r
- set.add(new NetworkRoute(fromnode, tonode, vianode));\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return set;\r
- }\r
-\r
- public NetworkRoute(String fromnode, String tonode) throws IllegalArgumentException {\r
- this.fromnode = lookupNodeName(fromnode);\r
- this.tonode = lookupNodeName(tonode);\r
- this.vianode = -1;\r
- }\r
-\r
- public NetworkRoute(String fromnode, String tonode, String vianode) throws IllegalArgumentException {\r
- this.fromnode = lookupNodeName(fromnode);\r
- this.tonode = lookupNodeName(tonode);\r
- this.vianode = lookupNodeName(vianode);\r
- }\r
-\r
- public NetworkRoute(JSONObject jo) throws IllegalArgumentException {\r
- this.fromnode = lookupNodeName(jo.getString("from"));\r
- this.tonode = lookupNodeName(jo.getString("to"));\r
- this.vianode = lookupNodeName(jo.getString("via"));\r
- }\r
-\r
- public NetworkRoute(int fromnode, int tonode, int vianode) throws IllegalArgumentException {\r
- this.fromnode = fromnode;\r
- this.tonode = tonode;\r
- this.vianode = vianode;\r
- }\r
-\r
- public int getFromnode() {\r
- return fromnode;\r
- }\r
-\r
- public int getTonode() {\r
- return tonode;\r
- }\r
-\r
- public int getVianode() {\r
- return vianode;\r
- }\r
-\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "delete from NETWORK_ROUTES where FROMNODE = ? AND TONODE = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, fromnode);\r
- ps.setInt(2, tonode);\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public boolean doInsert(Connection c) {\r
- boolean rv = false;\r
- if (this.vianode >= 0) {\r
- PreparedStatement ps = null;\r
- try {\r
- // Create the NETWORK_ROUTES row\r
- String sql = "insert into NETWORK_ROUTES (FROMNODE, TONODE, VIANODE) values (?, ?, ?)";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, this.fromnode);\r
- ps.setInt(2, this.tonode);\r
- ps.setInt(3, this.vianode);\r
- ps.execute();\r
- ps.close();\r
- rv = true;\r
- } catch (SQLException e) {\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "update NETWORK_ROUTES set VIANODE = ? where FROMNODE = ? and TONODE = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, vianode);\r
- ps.setInt(2, fromnode);\r
- ps.setInt(3, tonode);\r
- ps.executeUpdate();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("from", lookupNodeID(fromnode));\r
- jo.put("to", lookupNodeID(tonode));\r
- jo.put("via", lookupNodeID(vianode));\r
- return jo;\r
- }\r
-\r
- @Override\r
- public String getKey() {\r
- return lookupNodeID(fromnode)+":"+lookupNodeID(tonode);\r
- }\r
-\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof NetworkRoute))\r
- return false;\r
- NetworkRoute on = (NetworkRoute)obj;\r
- return (fromnode == on.fromnode) && (tonode == on.tonode) && (vianode == on.vianode);\r
- }\r
-\r
- @Override\r
- public int compareTo(NetworkRoute o) {\r
- if (this.fromnode == o.fromnode) {\r
- if (this.tonode == o.tonode)\r
- return this.vianode - o.vianode;\r
- return this.tonode - o.tonode;\r
- }\r
- return this.fromnode - o.fromnode;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return String.format("NETWORK: from=%d, to=%d, via=%d", fromnode, tonode, vianode);\r
- }\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private final int fromnode;\r
+ private final int tonode;\r
+ private final int vianode;\r
+\r
+ /**\r
+ * Get a set of all Network Routes in the DB. The set is sorted according to the natural sorting order\r
+ * of the routes (based on the from and to node names in each route).\r
+ *\r
+ * @return the sorted set\r
+ */\r
+ public static SortedSet<NetworkRoute> getAllNetworkRoutes() {\r
+ SortedSet<NetworkRoute> set = new TreeSet<NetworkRoute>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select FROMNODE, TONODE, VIANODE from NETWORK_ROUTES");\r
+ while (rs.next()) {\r
+ int fromnode = rs.getInt("FROMNODE");\r
+ int tonode = rs.getInt("TONODE");\r
+ int vianode = rs.getInt("VIANODE");\r
+ set.add(new NetworkRoute(fromnode, tonode, vianode));\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return set;\r
+ }\r
+\r
+ public NetworkRoute(String fromnode, String tonode) throws IllegalArgumentException {\r
+ this.fromnode = lookupNodeName(fromnode);\r
+ this.tonode = lookupNodeName(tonode);\r
+ this.vianode = -1;\r
+ }\r
+\r
+ public NetworkRoute(String fromnode, String tonode, String vianode) throws IllegalArgumentException {\r
+ this.fromnode = lookupNodeName(fromnode);\r
+ this.tonode = lookupNodeName(tonode);\r
+ this.vianode = lookupNodeName(vianode);\r
+ }\r
+\r
+ public NetworkRoute(JSONObject jo) throws IllegalArgumentException {\r
+ this.fromnode = lookupNodeName(jo.getString("from"));\r
+ this.tonode = lookupNodeName(jo.getString("to"));\r
+ this.vianode = lookupNodeName(jo.getString("via"));\r
+ }\r
+\r
+ public NetworkRoute(int fromnode, int tonode, int vianode) throws IllegalArgumentException {\r
+ this.fromnode = fromnode;\r
+ this.tonode = tonode;\r
+ this.vianode = vianode;\r
+ }\r
+\r
+ public int getFromnode() {\r
+ return fromnode;\r
+ }\r
+\r
+ public int getTonode() {\r
+ return tonode;\r
+ }\r
+\r
+ public int getVianode() {\r
+ return vianode;\r
+ }\r
+\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "delete from NETWORK_ROUTES where FROMNODE = ? AND TONODE = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, fromnode);\r
+ ps.setInt(2, tonode);\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doInsert(Connection c) {\r
+ boolean rv = false;\r
+ if (this.vianode >= 0) {\r
+ PreparedStatement ps = null;\r
+ try {\r
+ // Create the NETWORK_ROUTES row\r
+ String sql = "insert into NETWORK_ROUTES (FROMNODE, TONODE, VIANODE) values (?, ?, ?)";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, this.fromnode);\r
+ ps.setInt(2, this.tonode);\r
+ ps.setInt(3, this.vianode);\r
+ ps.execute();\r
+ ps.close();\r
+ rv = true;\r
+ } catch (SQLException e) {\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "update NETWORK_ROUTES set VIANODE = ? where FROMNODE = ? and TONODE = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, vianode);\r
+ ps.setInt(2, fromnode);\r
+ ps.setInt(3, tonode);\r
+ ps.executeUpdate();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("from", lookupNodeID(fromnode));\r
+ jo.put("to", lookupNodeID(tonode));\r
+ jo.put("via", lookupNodeID(vianode));\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public String getKey() {\r
+ return lookupNodeID(fromnode) + ":" + lookupNodeID(tonode);\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof NetworkRoute))\r
+ return false;\r
+ NetworkRoute on = (NetworkRoute) obj;\r
+ return (fromnode == on.fromnode) && (tonode == on.tonode) && (vianode == on.vianode);\r
+ }\r
+\r
+ @Override\r
+ public int compareTo(NetworkRoute o) {\r
+ if (this.fromnode == o.fromnode) {\r
+ if (this.tonode == o.tonode)\r
+ return this.vianode - o.vianode;\r
+ return this.tonode - o.tonode;\r
+ }\r
+ return this.fromnode - o.fromnode;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return String.format("NETWORK: from=%d, to=%d, via=%d", fromnode, tonode, vianode);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: NodeClass.java,v 1.2 2014/01/15 16:08:43 eby Exp $\r
*/\r
public abstract class NodeClass extends Syncable {\r
- private static Map<String, Integer> map;\r
+ private static Map<String, Integer> map;\r
\r
- public NodeClass() {\r
- // init on first use\r
- if (map == null) {\r
- reload();\r
- }\r
- }\r
+ public NodeClass() {\r
+ // init on first use\r
+ if (map == null) {\r
+ reload();\r
+ }\r
+ }\r
\r
- /**\r
- * Add nodes to the NODES table, when the NODES parameter value is changed.\r
- * Nodes are only added to the table, they are never deleted. The node name is normalized\r
- * to contain the domain (if missing).\r
- * @param nodes a pipe separated list of the current nodes\r
- */\r
- public static void setNodes(String[] nodes) {\r
- if (map == null)\r
- reload();\r
- int nextid = 0;\r
- for (Integer n : map.values()) {\r
- if (n >= nextid)\r
- nextid = n+1;\r
- }\r
- // take | separated list, add domain if needed.\r
- Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- for (String node : nodes) {\r
- node = normalizeNodename(node);\r
- if (!map.containsKey(node)) {\r
- intlogger.info("..adding "+node+" to NODES with index "+nextid);\r
- map.put(node, nextid);\r
- PreparedStatement ps = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- ps = conn.prepareStatement("insert into NODES (NODEID, NAME, ACTIVE) values (?, ?, 1)");\r
- ps.setInt(1, nextid);\r
- ps.setString(2, node);\r
- ps.execute();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- nextid++;\r
- }\r
- }\r
- }\r
+ /**\r
+ * Add nodes to the NODES table, when the NODES parameter value is changed.\r
+ * Nodes are only added to the table, they are never deleted. The node name is normalized\r
+ * to contain the domain (if missing).\r
+ *\r
+ * @param nodes a pipe separated list of the current nodes\r
+ */\r
+ public static void setNodes(String[] nodes) {\r
+ if (map == null)\r
+ reload();\r
+ int nextid = 0;\r
+ for (Integer n : map.values()) {\r
+ if (n >= nextid)\r
+ nextid = n + 1;\r
+ }\r
+ // take | separated list, add domain if needed.\r
+ Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ for (String node : nodes) {\r
+ node = normalizeNodename(node);\r
+ if (!map.containsKey(node)) {\r
+ intlogger.info("..adding " + node + " to NODES with index " + nextid);\r
+ map.put(node, nextid);\r
+ PreparedStatement ps = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ ps = conn.prepareStatement("insert into NODES (NODEID, NAME, ACTIVE) values (?, ?, 1)");\r
+ ps.setInt(1, nextid);\r
+ ps.setString(2, node);\r
+ ps.execute();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ nextid++;\r
+ }\r
+ }\r
+ }\r
\r
- public static void reload() {\r
- Map<String, Integer> m = new HashMap<String, Integer>();\r
- PreparedStatement ps = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- String sql = "select NODEID, NAME from NODES";\r
- ps = conn.prepareStatement(sql);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- int id = rs.getInt("NODEID");\r
- String name = rs.getString("NAME");\r
- m.put(name, id);\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- map = m;\r
- }\r
+ public static void reload() {\r
+ Map<String, Integer> m = new HashMap<String, Integer>();\r
+ PreparedStatement ps = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ String sql = "select NODEID, NAME from NODES";\r
+ ps = conn.prepareStatement(sql);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ int id = rs.getInt("NODEID");\r
+ String name = rs.getString("NAME");\r
+ m.put(name, id);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ map = m;\r
+ }\r
\r
- public static Integer lookupNodeName(final String name) throws IllegalArgumentException {\r
- Integer n = map.get(name);\r
- if (n == null)\r
- throw new IllegalArgumentException("Invalid node name: "+name);\r
- return n;\r
- }\r
+ public static Integer lookupNodeName(final String name) throws IllegalArgumentException {\r
+ Integer n = map.get(name);\r
+ if (n == null)\r
+ throw new IllegalArgumentException("Invalid node name: " + name);\r
+ return n;\r
+ }\r
\r
- public static Collection<String> lookupNodeNames(String patt) throws IllegalArgumentException {\r
- Collection<String> coll = new TreeSet<String>();\r
- final Set<String> keyset = map.keySet();\r
- for (String s : patt.toLowerCase().split(",")) {\r
- if (s.endsWith("*")) {\r
- s = s.substring(0, s.length()-1);\r
- for (String s2 : keyset) {\r
- if (s2.startsWith(s))\r
- coll.add(s2);\r
- }\r
- } else if (keyset.contains(s)) {\r
- coll.add(s);\r
- } else if (keyset.contains(normalizeNodename(s))) {\r
- coll.add(normalizeNodename(s));\r
- } else {\r
- throw new IllegalArgumentException("Invalid node name: "+s);\r
- }\r
- }\r
- return coll;\r
- }\r
+ public static Collection<String> lookupNodeNames(String patt) throws IllegalArgumentException {\r
+ Collection<String> coll = new TreeSet<String>();\r
+ final Set<String> keyset = map.keySet();\r
+ for (String s : patt.toLowerCase().split(",")) {\r
+ if (s.endsWith("*")) {\r
+ s = s.substring(0, s.length() - 1);\r
+ for (String s2 : keyset) {\r
+ if (s2.startsWith(s))\r
+ coll.add(s2);\r
+ }\r
+ } else if (keyset.contains(s)) {\r
+ coll.add(s);\r
+ } else if (keyset.contains(normalizeNodename(s))) {\r
+ coll.add(normalizeNodename(s));\r
+ } else {\r
+ throw new IllegalArgumentException("Invalid node name: " + s);\r
+ }\r
+ }\r
+ return coll;\r
+ }\r
\r
- protected String lookupNodeID(int n) {\r
- for (String s : map.keySet()) {\r
- if (map.get(s) == n)\r
- return s;\r
- }\r
- return null;\r
- }\r
+ protected String lookupNodeID(int n) {\r
+ for (String s : map.keySet()) {\r
+ if (map.get(s) == n)\r
+ return s;\r
+ }\r
+ return null;\r
+ }\r
\r
- public static String normalizeNodename(String s) {\r
- if (s != null && s.indexOf('.') <= 0) {\r
- Parameters p = Parameters.getParameter(Parameters.PROV_DOMAIN);\r
- if (p != null) {\r
- String domain = p.getValue();\r
- s += "." + domain;\r
- }\r
- }\r
- return s.toLowerCase();\r
- }\r
+ public static String normalizeNodename(String s) {\r
+ if (s != null && s.indexOf('.') <= 0) {\r
+ Parameters p = Parameters.getParameter(Parameters.PROV_DOMAIN);\r
+ if (p != null) {\r
+ String domain = p.getValue();\r
+ s += "." + domain;\r
+ }\r
+ }\r
+ return s.toLowerCase();\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
/**\r
* Methods to provide access to Provisioning parameters in the DB.\r
* This class also provides constants of the standard parameters used by the Data Router.\r
+ *\r
* @author Robert Eby\r
* @version $Id: Parameters.java,v 1.11 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class Parameters extends Syncable {\r
- public static final String PROV_REQUIRE_SECURE = "PROV_REQUIRE_SECURE";\r
- public static final String PROV_REQUIRE_CERT = "PROV_REQUIRE_CERT";\r
- public static final String PROV_AUTH_ADDRESSES = "PROV_AUTH_ADDRESSES";\r
- public static final String PROV_AUTH_SUBJECTS = "PROV_AUTH_SUBJECTS";\r
- public static final String PROV_NAME = "PROV_NAME";\r
- public static final String PROV_ACTIVE_NAME = "PROV_ACTIVE_NAME";\r
- public static final String PROV_DOMAIN = "PROV_DOMAIN";\r
- public static final String PROV_MAXFEED_COUNT = "PROV_MAXFEED_COUNT";\r
- public static final String PROV_MAXSUB_COUNT = "PROV_MAXSUB_COUNT";\r
- public static final String PROV_POKETIMER1 = "PROV_POKETIMER1";\r
- public static final String PROV_POKETIMER2 = "PROV_POKETIMER2";\r
- public static final String PROV_SPECIAL_SUBNET = "PROV_SPECIAL_SUBNET";\r
- public static final String PROV_LOG_RETENTION = "PROV_LOG_RETENTION";\r
- public static final String NODES = "NODES";\r
- public static final String ACTIVE_POD = "ACTIVE_POD";\r
- public static final String STANDBY_POD = "STANDBY_POD";\r
- public static final String LOGROLL_INTERVAL = "LOGROLL_INTERVAL";\r
- public static final String DELIVERY_INIT_RETRY_INTERVAL = "DELIVERY_INIT_RETRY_INTERVAL";\r
- public static final String DELIVERY_MAX_RETRY_INTERVAL = "DELIVERY_MAX_RETRY_INTERVAL";\r
- public static final String DELIVERY_RETRY_RATIO = "DELIVERY_RETRY_RATIO";\r
- public static final String DELIVERY_MAX_AGE = "DELIVERY_MAX_AGE";\r
- public static final String THROTTLE_FILTER = "THROTTLE_FILTER";\r
- public static final String STATIC_ROUTING_NODES = "STATIC_ROUTING_NODES"; //Adding new param for static Routing - Rally:US664862-1610\r
+ public static final String PROV_REQUIRE_SECURE = "PROV_REQUIRE_SECURE";\r
+ public static final String PROV_REQUIRE_CERT = "PROV_REQUIRE_CERT";\r
+ public static final String PROV_AUTH_ADDRESSES = "PROV_AUTH_ADDRESSES";\r
+ public static final String PROV_AUTH_SUBJECTS = "PROV_AUTH_SUBJECTS";\r
+ public static final String PROV_NAME = "PROV_NAME";\r
+ public static final String PROV_ACTIVE_NAME = "PROV_ACTIVE_NAME";\r
+ public static final String PROV_DOMAIN = "PROV_DOMAIN";\r
+ public static final String PROV_MAXFEED_COUNT = "PROV_MAXFEED_COUNT";\r
+ public static final String PROV_MAXSUB_COUNT = "PROV_MAXSUB_COUNT";\r
+ public static final String PROV_POKETIMER1 = "PROV_POKETIMER1";\r
+ public static final String PROV_POKETIMER2 = "PROV_POKETIMER2";\r
+ public static final String PROV_SPECIAL_SUBNET = "PROV_SPECIAL_SUBNET";\r
+ public static final String PROV_LOG_RETENTION = "PROV_LOG_RETENTION";\r
+ public static final String NODES = "NODES";\r
+ public static final String ACTIVE_POD = "ACTIVE_POD";\r
+ public static final String STANDBY_POD = "STANDBY_POD";\r
+ public static final String LOGROLL_INTERVAL = "LOGROLL_INTERVAL";\r
+ public static final String DELIVERY_INIT_RETRY_INTERVAL = "DELIVERY_INIT_RETRY_INTERVAL";\r
+ public static final String DELIVERY_MAX_RETRY_INTERVAL = "DELIVERY_MAX_RETRY_INTERVAL";\r
+ public static final String DELIVERY_RETRY_RATIO = "DELIVERY_RETRY_RATIO";\r
+ public static final String DELIVERY_MAX_AGE = "DELIVERY_MAX_AGE";\r
+ public static final String THROTTLE_FILTER = "THROTTLE_FILTER";\r
+ public static final String STATIC_ROUTING_NODES = "STATIC_ROUTING_NODES"; //Adding new param for static Routing - Rally:US664862-1610\r
+\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+\r
+ private String keyname;\r
+ private String value;\r
+\r
+ /**\r
+ * Get all parameters in the DB as a Map.\r
+ *\r
+ * @return the Map of keynames/values from the DB.\r
+ */\r
+ public static Map<String, String> getParameters() {\r
+ Map<String, String> props = new HashMap<String, String>();\r
+ for (Parameters p : getParameterCollection()) {\r
+ props.put(p.getKeyname(), p.getValue());\r
+ }\r
+ return props;\r
+ }\r
+\r
+ public static Collection<Parameters> getParameterCollection() {\r
+ Collection<Parameters> coll = new ArrayList<Parameters>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ String sql = "select * from PARAMETERS";\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ Parameters p = new Parameters(rs);\r
+ coll.add(p);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return coll;\r
+ }\r
+\r
+ /**\r
+ * Get a specific parameter value from the DB.\r
+ *\r
+ * @param k the key to lookup\r
+ * @return the value, or null if non-existant\r
+ */\r
+ public static Parameters getParameter(String k) {\r
+ Parameters v = null;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ String sql = "select KEYNAME, VALUE from PARAMETERS where KEYNAME = \"" + k + "\"";\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ if (rs.next()) {\r
+ v = new Parameters(rs);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return v;\r
+ }\r
+\r
+ public Parameters() {\r
+ this("", "");\r
+ }\r
+\r
+ public Parameters(String k, String v) {\r
+ this.keyname = k;\r
+ this.value = v;\r
+ }\r
+\r
+ public Parameters(ResultSet rs) throws SQLException {\r
+ this.keyname = rs.getString("KEYNAME");\r
+ this.value = rs.getString("VALUE");\r
+ }\r
+\r
+ public String getKeyname() {\r
+ return keyname;\r
+ }\r
+\r
+ public void setKeyname(String keyname) {\r
+ this.keyname = keyname;\r
+ }\r
+\r
+ public String getValue() {\r
+ return value;\r
+ }\r
+\r
+ public void setValue(String value) {\r
+ this.value = value;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("keyname", keyname);\r
+ jo.put("value", value);\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean doInsert(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ // Create the SUBSCRIPTIONS row\r
+ String sql = "insert into PARAMETERS values (?, ?)";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, getKeyname());\r
+ ps.setString(2, getValue());\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ // Update the PARAMETERS row\r
+ String sql = "update PARAMETERS set VALUE = ? where KEYNAME = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, getValue());\r
+ ps.setString(2, getKeyname());\r
+ ps.executeUpdate();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
\r
- private String keyname;\r
- private String value;\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ // Create the SUBSCRIPTIONS row\r
+ String sql = "delete from PARAMETERS where KEYNAME = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, getKeyname());\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
\r
- /**\r
- * Get all parameters in the DB as a Map.\r
- * @return the Map of keynames/values from the DB.\r
- */\r
- public static Map<String,String> getParameters() {\r
- Map<String,String> props = new HashMap<String,String>();\r
- for (Parameters p : getParameterCollection()) {\r
- props.put(p.getKeyname(), p.getValue());\r
- }\r
- return props;\r
- }\r
- public static Collection<Parameters> getParameterCollection() {\r
- Collection<Parameters> coll = new ArrayList<Parameters>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- String sql = "select * from PARAMETERS";\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- Parameters p = new Parameters(rs);\r
- coll.add(p);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return coll;\r
- }\r
- /**\r
- * Get a specific parameter value from the DB.\r
- * @param k the key to lookup\r
- * @return the value, or null if non-existant\r
- */\r
- public static Parameters getParameter(String k) {\r
- Parameters v = null;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- String sql = "select KEYNAME, VALUE from PARAMETERS where KEYNAME = \"" + k + "\"";\r
- ResultSet rs = stmt.executeQuery(sql);\r
- if (rs.next()) {\r
- v = new Parameters(rs);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return v;\r
- }\r
+ @Override\r
+ public String getKey() {\r
+ return getKeyname();\r
+ }\r
\r
- public Parameters() {\r
- this("", "");\r
- }\r
- public Parameters(String k, String v) {\r
- this.keyname = k;\r
- this.value = v;\r
- }\r
- public Parameters(ResultSet rs) throws SQLException {\r
- this.keyname = rs.getString("KEYNAME");\r
- this.value = rs.getString("VALUE");\r
- }\r
- public String getKeyname() {\r
- return keyname;\r
- }\r
- public void setKeyname(String keyname) {\r
- this.keyname = keyname;\r
- }\r
- public String getValue() {\r
- return value;\r
- }\r
- public void setValue(String value) {\r
- this.value = value;\r
- }\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("keyname", keyname);\r
- jo.put("value", value);\r
- return jo;\r
- }\r
- @Override\r
- public boolean doInsert(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- // Create the SUBSCRIPTIONS row\r
- String sql = "insert into PARAMETERS values (?, ?)";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, getKeyname());\r
- ps.setString(2, getValue());\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- // Update the PARAMETERS row\r
- String sql = "update PARAMETERS set VALUE = ? where KEYNAME = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, getValue());\r
- ps.setString(2, getKeyname());\r
- ps.executeUpdate();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- // Create the SUBSCRIPTIONS row\r
- String sql = "delete from PARAMETERS where KEYNAME = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, getKeyname());\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public String getKey() {\r
- return getKeyname();\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof Parameters))\r
- return false;\r
- Parameters of = (Parameters) obj;\r
- if (!keyname.equals(of.keyname))\r
- return false;\r
- if (!value.equals(of.value))\r
- return false;\r
- return true;\r
- }\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof Parameters))\r
+ return false;\r
+ Parameters of = (Parameters) obj;\r
+ if (!keyname.equals(of.keyname))\r
+ return false;\r
+ if (!value.equals(of.value))\r
+ return false;\r
+ return true;\r
+ }\r
\r
- @Override\r
- public String toString() {\r
- return "PARAM: keyname=" + keyname + ", value=" + value;\r
- }\r
+ @Override\r
+ public String toString() {\r
+ return "PARAM: keyname=" + keyname + ", value=" + value;\r
+ }\r
}\r
\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Publish Failure (PBF) Record, as retrieved from the DB.\r
+ *\r
* @author Robert Eby\r
* @version $Id: PubFailRecord.java,v 1.1 2013/10/28 18:06:53 eby Exp $\r
*/\r
public class PubFailRecord extends BaseLogRecord {\r
- private long contentLengthReceived;\r
- private String sourceIP;\r
- private String user;\r
- private String error;\r
+ private long contentLengthReceived;\r
+ private String sourceIP;\r
+ private String user;\r
+ private String error;\r
+\r
+ public PubFailRecord(String[] pp) throws ParseException {\r
+ super(pp);\r
+ this.contentLengthReceived = Long.parseLong(pp[8]);\r
+ this.sourceIP = pp[9];\r
+ this.user = pp[10];\r
+ this.error = pp[11];\r
+ }\r
+\r
+ public PubFailRecord(ResultSet rs) throws SQLException {\r
+ super(rs);\r
+ // Note: because this record should be "rare" these fields are mapped to unconventional fields in the DB\r
+ this.contentLengthReceived = rs.getLong("CONTENT_LENGTH_2");\r
+ this.sourceIP = rs.getString("REMOTE_ADDR");\r
+ this.user = rs.getString("USER");\r
+ this.error = rs.getString("FEED_FILEID");\r
+ }\r
+\r
+ public long getContentLengthReceived() {\r
+ return contentLengthReceived;\r
+ }\r
+\r
+ public String getSourceIP() {\r
+ return sourceIP;\r
+ }\r
+\r
+ public String getUser() {\r
+ return user;\r
+ }\r
+\r
+ public String getError() {\r
+ return error;\r
+ }\r
\r
- public PubFailRecord(String[] pp) throws ParseException {\r
- super(pp);\r
- this.contentLengthReceived = Long.parseLong(pp[8]);\r
- this.sourceIP = pp[9];\r
- this.user = pp[10];\r
- this.error = pp[11];\r
- }\r
- public PubFailRecord(ResultSet rs) throws SQLException {\r
- super(rs);\r
- // Note: because this record should be "rare" these fields are mapped to unconventional fields in the DB\r
- this.contentLengthReceived = rs.getLong("CONTENT_LENGTH_2");\r
- this.sourceIP = rs.getString("REMOTE_ADDR");\r
- this.user = rs.getString("USER");\r
- this.error = rs.getString("FEED_FILEID");\r
- }\r
- public long getContentLengthReceived() {\r
- return contentLengthReceived;\r
- }\r
- public String getSourceIP() {\r
- return sourceIP;\r
- }\r
- public String getUser() {\r
- return user;\r
- }\r
- public String getError() {\r
- return error;\r
- }\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setString(1, "pbf"); // field 1: type\r
- super.load(ps); // loads fields 2-8\r
- ps.setString( 9, getError());\r
- ps.setString(10, getSourceIP());\r
- ps.setString(11, getUser());\r
- ps.setNull (12, Types.INTEGER);\r
- ps.setNull (13, Types.INTEGER);\r
- ps.setNull (14, Types.VARCHAR);\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setLong (19, getContentLengthReceived());\r
- }\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setString(1, "pbf"); // field 1: type\r
+ super.load(ps); // loads fields 2-8\r
+ ps.setString(9, getError());\r
+ ps.setString(10, getSourceIP());\r
+ ps.setString(11, getUser());\r
+ ps.setNull(12, Types.INTEGER);\r
+ ps.setNull(13, Types.INTEGER);\r
+ ps.setNull(14, Types.VARCHAR);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setLong(19, getContentLengthReceived());\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Publish Record, as retrieved from the DB.\r
+ *\r
* @author Robert Eby\r
* @version $Id: PublishRecord.java,v 1.6 2013/10/28 18:06:53 eby Exp $\r
*/\r
public class PublishRecord extends BaseLogRecord {\r
- private String feedFileid;\r
- private String remoteAddr;\r
- private String user;\r
- private int status;\r
-\r
- public PublishRecord(String[] pp) throws ParseException {\r
- super(pp);\r
-// This is too slow!\r
-// Matcher m = Pattern.compile(".*/publish/(\\d+)/(.*)$").matcher(pp[4]);\r
-// if (!m.matches())\r
-// throw new ParseException("bad pattern", 0);\r
-// this.feedFileid = m.group(2);\r
- int ix = pp[4].indexOf("/publish/");\r
- if (ix < 0)\r
- throw new ParseException("bad pattern", 0);\r
- ix = pp[4].indexOf('/', ix+9);\r
- if (ix < 0)\r
- throw new ParseException("bad pattern", 0);\r
- this.feedFileid = pp[4].substring(ix+1);\r
- this.remoteAddr = pp[8];\r
- this.user = pp[9];\r
- this.status = Integer.parseInt(pp[10]);\r
- }\r
- public PublishRecord(ResultSet rs) throws SQLException {\r
- super(rs);\r
- this.feedFileid = rs.getString("FEED_FILEID");\r
- this.remoteAddr = rs.getString("REMOTE_ADDR");\r
- this.user = rs.getString("USER");\r
- this.status = rs.getInt("STATUS");\r
- }\r
- public String getFeedFileid() {\r
- return feedFileid;\r
- }\r
-\r
- public void setFeedFileid(String feedFileid) {\r
- this.feedFileid = feedFileid;\r
- }\r
-\r
- public String getRemoteAddr() {\r
- return remoteAddr;\r
- }\r
-\r
- public void setRemoteAddr(String remoteAddr) {\r
- this.remoteAddr = remoteAddr;\r
- }\r
-\r
- public String getUser() {\r
- return user;\r
- }\r
-\r
- public void setUser(String user) {\r
- this.user = user;\r
- }\r
-\r
- public int getStatus() {\r
- return status;\r
- }\r
-\r
- public void setStatus(int status) {\r
- this.status = status;\r
- }\r
- \r
- \r
- public LOGJSONObject reOrderObject(LOGJSONObject jo) {\r
- LinkedHashMap<String,Object> logrecordObj = new LinkedHashMap<String,Object>();\r
- \r
- \r
- logrecordObj.put("statusCode", jo.get("statusCode"));\r
- logrecordObj.put("publishId", jo.get("publishId"));\r
- logrecordObj.put("requestURI", jo.get("requestURI"));\r
- logrecordObj.put("sourceIP", jo.get("sourceIP"));\r
- logrecordObj.put("method", jo.get("method"));\r
- logrecordObj.put("contentType", jo.get("contentType"));\r
- logrecordObj.put("endpointId", jo.get("endpointId"));\r
- logrecordObj.put("type", jo.get("type"));\r
- logrecordObj.put("date", jo.get("date"));\r
- logrecordObj.put("contentLength", jo.get("contentLength"));\r
- \r
- LOGJSONObject newjo = new LOGJSONObject(logrecordObj);\r
- return newjo;\r
- }\r
- \r
- @Override\r
- public LOGJSONObject asJSONObject() {\r
- LOGJSONObject jo = super.asJSONObject();\r
- jo.put("type", "pub");\r
-// jo.put("feedFileid", feedFileid);\r
-// jo.put("remoteAddr", remoteAddr);\r
-// jo.put("user", user);\r
- jo.put("sourceIP", remoteAddr);\r
- jo.put("endpointId", user);\r
- jo.put("statusCode", status);\r
- \r
- LOGJSONObject newjo = this.reOrderObject(jo);\r
- \r
- return newjo;\r
- }\r
- @Override\r
- public void load(PreparedStatement ps) throws SQLException {\r
- ps.setString(1, "pub"); // field 1: type\r
- super.load(ps); // loads fields 2-8\r
- ps.setString( 9, getFeedFileid());\r
- ps.setString(10, getRemoteAddr());\r
- ps.setString(11, getUser());\r
- ps.setInt (12, getStatus());\r
- ps.setNull (13, Types.INTEGER);\r
- ps.setNull (14, Types.VARCHAR);\r
- ps.setNull (15, Types.INTEGER);\r
- ps.setNull (16, Types.INTEGER);\r
- ps.setNull (17, Types.VARCHAR);\r
- ps.setNull (19, Types.BIGINT);\r
- }\r
+ private String feedFileid;\r
+ private String remoteAddr;\r
+ private String user;\r
+ private int status;\r
+\r
+ public PublishRecord(String[] pp) throws ParseException {\r
+ super(pp);\r
+// This is too slow!\r
+// Matcher m = Pattern.compile(".*/publish/(\\d+)/(.*)$").matcher(pp[4]);\r
+// if (!m.matches())\r
+// throw new ParseException("bad pattern", 0);\r
+// this.feedFileid = m.group(2);\r
+ int ix = pp[4].indexOf("/publish/");\r
+ if (ix < 0)\r
+ throw new ParseException("bad pattern", 0);\r
+ ix = pp[4].indexOf('/', ix + 9);\r
+ if (ix < 0)\r
+ throw new ParseException("bad pattern", 0);\r
+ this.feedFileid = pp[4].substring(ix + 1);\r
+ this.remoteAddr = pp[8];\r
+ this.user = pp[9];\r
+ this.status = Integer.parseInt(pp[10]);\r
+ }\r
+\r
+ public PublishRecord(ResultSet rs) throws SQLException {\r
+ super(rs);\r
+ this.feedFileid = rs.getString("FEED_FILEID");\r
+ this.remoteAddr = rs.getString("REMOTE_ADDR");\r
+ this.user = rs.getString("USER");\r
+ this.status = rs.getInt("STATUS");\r
+ }\r
+\r
+ public String getFeedFileid() {\r
+ return feedFileid;\r
+ }\r
+\r
+ public void setFeedFileid(String feedFileid) {\r
+ this.feedFileid = feedFileid;\r
+ }\r
+\r
+ public String getRemoteAddr() {\r
+ return remoteAddr;\r
+ }\r
+\r
+ public void setRemoteAddr(String remoteAddr) {\r
+ this.remoteAddr = remoteAddr;\r
+ }\r
+\r
+ public String getUser() {\r
+ return user;\r
+ }\r
+\r
+ public void setUser(String user) {\r
+ this.user = user;\r
+ }\r
+\r
+ public int getStatus() {\r
+ return status;\r
+ }\r
+\r
+ public void setStatus(int status) {\r
+ this.status = status;\r
+ }\r
+\r
+\r
+ public LOGJSONObject reOrderObject(LOGJSONObject jo) {\r
+ LinkedHashMap<String, Object> logrecordObj = new LinkedHashMap<String, Object>();\r
+\r
+\r
+ logrecordObj.put("statusCode", jo.get("statusCode"));\r
+ logrecordObj.put("publishId", jo.get("publishId"));\r
+ logrecordObj.put("requestURI", jo.get("requestURI"));\r
+ logrecordObj.put("sourceIP", jo.get("sourceIP"));\r
+ logrecordObj.put("method", jo.get("method"));\r
+ logrecordObj.put("contentType", jo.get("contentType"));\r
+ logrecordObj.put("endpointId", jo.get("endpointId"));\r
+ logrecordObj.put("type", jo.get("type"));\r
+ logrecordObj.put("date", jo.get("date"));\r
+ logrecordObj.put("contentLength", jo.get("contentLength"));\r
+\r
+ LOGJSONObject newjo = new LOGJSONObject(logrecordObj);\r
+ return newjo;\r
+ }\r
+\r
+ @Override\r
+ public LOGJSONObject asJSONObject() {\r
+ LOGJSONObject jo = super.asJSONObject();\r
+ jo.put("type", "pub");\r
+// jo.put("feedFileid", feedFileid);\r
+// jo.put("remoteAddr", remoteAddr);\r
+// jo.put("user", user);\r
+ jo.put("sourceIP", remoteAddr);\r
+ jo.put("endpointId", user);\r
+ jo.put("statusCode", status);\r
+\r
+ LOGJSONObject newjo = this.reOrderObject(jo);\r
+\r
+ return newjo;\r
+ }\r
+\r
+ @Override\r
+ public void load(PreparedStatement ps) throws SQLException {\r
+ ps.setString(1, "pub"); // field 1: type\r
+ super.load(ps); // loads fields 2-8\r
+ ps.setString(9, getFeedFileid());\r
+ ps.setString(10, getRemoteAddr());\r
+ ps.setString(11, getUser());\r
+ ps.setInt(12, getStatus());\r
+ ps.setNull(13, Types.INTEGER);\r
+ ps.setNull(14, Types.VARCHAR);\r
+ ps.setNull(15, Types.INTEGER);\r
+ ps.setNull(16, Types.INTEGER);\r
+ ps.setNull(17, Types.VARCHAR);\r
+ ps.setNull(19, Types.BIGINT);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
/**\r
* The representation of Subscription delivery information. This includes the URL to deliver to,\r
* login and password, and whether to use the "HTTP 100-continue" feature for this subscription.\r
+ *\r
* @author Robert Eby\r
* @version $Id: SubDelivery.java,v 1.2 2013/06/20 14:11:05 eby Exp $\r
*/\r
public class SubDelivery implements JSONable {\r
- private String url;\r
- private String user;\r
- private String password;\r
- private boolean use100;\r
-\r
- public SubDelivery() {\r
- this("", "", "", false);\r
- }\r
- public SubDelivery(String url, String user, String password, boolean use100) {\r
- this.url = url;\r
- this.user = user;\r
- this.password = password;\r
- this.use100 = use100;\r
- }\r
- public SubDelivery(ResultSet rs) throws SQLException {\r
- this.url = rs.getString("DELIVERY_URL");\r
- this.user = rs.getString("DELIVERY_USER");\r
- this.password = rs.getString("DELIVERY_PASSWORD");\r
- this.use100 = rs.getBoolean("DELIVERY_USE100");\r
-\r
- }\r
- public String getUrl() {\r
- return url;\r
- }\r
- public void setUrl(String url) {\r
- this.url = url;\r
- }\r
- public String getUser() {\r
- return user;\r
- }\r
- public void setUser(String user) {\r
- this.user = user;\r
- }\r
- public String getPassword() {\r
- return password;\r
- }\r
- public void setPassword(String password) {\r
- this.password = password;\r
- }\r
-\r
- public boolean isUse100() {\r
- return use100;\r
- }\r
- public void setUse100(boolean use100) {\r
- this.use100 = use100;\r
- }\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("url", url);\r
- jo.put("user", user);\r
- jo.put("password", password);\r
- jo.put("use100", use100);\r
- return jo;\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof SubDelivery))\r
- return false;\r
- SubDelivery os = (SubDelivery) obj;\r
- if (!url.equals(os.url))\r
- return false;\r
- if (!user.equals(os.user))\r
- return false;\r
- if (!password.equals(os.password))\r
- return false;\r
- if (use100 != os.use100)\r
- return false;\r
- return true;\r
- }\r
+ private String url;\r
+ private String user;\r
+ private String password;\r
+ private boolean use100;\r
+\r
+ public SubDelivery() {\r
+ this("", "", "", false);\r
+ }\r
+\r
+ public SubDelivery(String url, String user, String password, boolean use100) {\r
+ this.url = url;\r
+ this.user = user;\r
+ this.password = password;\r
+ this.use100 = use100;\r
+ }\r
+\r
+ public SubDelivery(ResultSet rs) throws SQLException {\r
+ this.url = rs.getString("DELIVERY_URL");\r
+ this.user = rs.getString("DELIVERY_USER");\r
+ this.password = rs.getString("DELIVERY_PASSWORD");\r
+ this.use100 = rs.getBoolean("DELIVERY_USE100");\r
+\r
+ }\r
+\r
+ public String getUrl() {\r
+ return url;\r
+ }\r
+\r
+ public void setUrl(String url) {\r
+ this.url = url;\r
+ }\r
+\r
+ public String getUser() {\r
+ return user;\r
+ }\r
+\r
+ public void setUser(String user) {\r
+ this.user = user;\r
+ }\r
+\r
+ public String getPassword() {\r
+ return password;\r
+ }\r
+\r
+ public void setPassword(String password) {\r
+ this.password = password;\r
+ }\r
+\r
+ public boolean isUse100() {\r
+ return use100;\r
+ }\r
+\r
+ public void setUse100(boolean use100) {\r
+ this.use100 = use100;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("url", url);\r
+ jo.put("user", user);\r
+ jo.put("password", password);\r
+ jo.put("use100", use100);\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof SubDelivery))\r
+ return false;\r
+ SubDelivery os = (SubDelivery) obj;\r
+ if (!url.equals(os.url))\r
+ return false;\r
+ if (!user.equals(os.user))\r
+ return false;\r
+ if (!password.equals(os.password))\r
+ return false;\r
+ if (use100 != os.use100)\r
+ return false;\r
+ return true;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The URLs associated with a Subscription.\r
+ *\r
* @author Robert Eby\r
* @version $Id: SubLinks.java,v 1.3 2013/07/05 13:48:05 eby Exp $\r
*/\r
public class SubLinks implements JSONable {\r
- private String self;\r
- private String feed;\r
- private String log;\r
-\r
- public SubLinks() {\r
- self = feed = log = null;\r
- }\r
- public SubLinks(JSONObject jo) throws InvalidObjectException {\r
- this();\r
- self = jo.getString("self");\r
- feed = jo.getString("feed");\r
- log = jo.getString("log");\r
- }\r
- public SubLinks(String self, String feed, String log) {\r
- this.self = self;\r
- this.feed = feed;\r
- this.log = log;\r
- }\r
- public String getSelf() {\r
- return self;\r
- }\r
- public void setSelf(String self) {\r
- this.self = self;\r
- }\r
- public String getFeed() {\r
- return feed;\r
- }\r
- public void setFeed(String feed) {\r
- this.feed = feed;\r
- }\r
- public String getLog() {\r
- return log;\r
- }\r
- public void setLog(String log) {\r
- this.log = log;\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("self", self);\r
- jo.put("feed", feed);\r
- jo.put("log", log);\r
- return jo;\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof SubLinks))\r
- return false;\r
- SubLinks os = (SubLinks) obj;\r
- if (!self.equals(os.self))\r
- return false;\r
- if (!feed.equals(os.feed))\r
- return false;\r
- if (!log.equals(os.log))\r
- return false;\r
- return true;\r
- }\r
+ private String self;\r
+ private String feed;\r
+ private String log;\r
+\r
+ public SubLinks() {\r
+ self = feed = log = null;\r
+ }\r
+\r
+ public SubLinks(JSONObject jo) throws InvalidObjectException {\r
+ this();\r
+ self = jo.getString("self");\r
+ feed = jo.getString("feed");\r
+ log = jo.getString("log");\r
+ }\r
+\r
+ public SubLinks(String self, String feed, String log) {\r
+ this.self = self;\r
+ this.feed = feed;\r
+ this.log = log;\r
+ }\r
+\r
+ public String getSelf() {\r
+ return self;\r
+ }\r
+\r
+ public void setSelf(String self) {\r
+ this.self = self;\r
+ }\r
+\r
+ public String getFeed() {\r
+ return feed;\r
+ }\r
+\r
+ public void setFeed(String feed) {\r
+ this.feed = feed;\r
+ }\r
+\r
+ public String getLog() {\r
+ return log;\r
+ }\r
+\r
+ public void setLog(String log) {\r
+ this.log = log;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("self", self);\r
+ jo.put("feed", feed);\r
+ jo.put("log", log);\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof SubLinks))\r
+ return false;\r
+ SubLinks os = (SubLinks) obj;\r
+ if (!self.equals(os.self))\r
+ return false;\r
+ if (!feed.equals(os.feed))\r
+ return false;\r
+ if (!log.equals(os.log))\r
+ return false;\r
+ return true;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* The representation of a Subscription. Subscriptions can be retrieved from the DB, or stored/updated in the DB.\r
+ *\r
* @author Robert Eby\r
* @version $Id: Subscription.java,v 1.9 2013/10/28 18:06:53 eby Exp $\r
*/\r
public class Subscription extends Syncable {\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private static int next_subid = getMaxSubID() + 1;\r
-\r
- private int subid;\r
- private int feedid;\r
- private int groupid; //New field is added - Groups feature Rally:US708115 - 1610\r
- private SubDelivery delivery;\r
- private boolean metadataOnly;\r
- private String subscriber;\r
- private SubLinks links;\r
- private boolean suspended;\r
- private Date last_mod;\r
- private Date created_date;\r
-\r
- public static Subscription getSubscriptionMatching(Subscription sub) {\r
- SubDelivery deli = sub.getDelivery();\r
- String sql = String.format(\r
- "select * from SUBSCRIPTIONS where FEEDID = %d and DELIVERY_URL = \"%s\" and DELIVERY_USER = \"%s\" and DELIVERY_PASSWORD = \"%s\" and DELIVERY_USE100 = %d and METADATA_ONLY = %d",\r
- sub.getFeedid(),\r
- deli.getUrl(),\r
- deli.getUser(),\r
- deli.getPassword(),\r
- deli.isUse100() ? 1 : 0,\r
- sub.isMetadataOnly() ? 1 : 0\r
- );\r
- List<Subscription> list = getSubscriptionsForSQL(sql);\r
- return list.size() > 0 ? list.get(0) : null;\r
- }\r
- public static Subscription getSubscriptionById(int id) {\r
- String sql = "select * from SUBSCRIPTIONS where SUBID = " + id;\r
- List<Subscription> list = getSubscriptionsForSQL(sql);\r
- return list.size() > 0 ? list.get(0) : null;\r
- }\r
- public static Collection<Subscription> getAllSubscriptions() {\r
- return getSubscriptionsForSQL("select * from SUBSCRIPTIONS");\r
- }\r
- private static List<Subscription> getSubscriptionsForSQL(String sql) {\r
- List<Subscription> list = new ArrayList<Subscription>();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- Subscription sub = new Subscription(rs);\r
- list.add(sub);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return list;\r
- }\r
- public static int getMaxSubID() {\r
- int max = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select MAX(subid) from SUBSCRIPTIONS");\r
- if (rs.next()) {\r
- max = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.info("getMaxSubID: "+e.getMessage());\r
- e.printStackTrace();\r
- }\r
- return max;\r
- }\r
- public static Collection<String> getSubscriptionUrlList(int feedid) {\r
- List<String> list = new ArrayList<String>();\r
- String sql = "select SUBID from SUBSCRIPTIONS where FEEDID = "+feedid;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery(sql);\r
- while (rs.next()) {\r
- int subid = rs.getInt("SUBID");\r
- list.add(URLUtilities.generateSubscriptionURL(subid));\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- return list;\r
- }\r
- /**\r
- * Return a count of the number of active subscriptions in the DB.\r
- * @return the count\r
- */\r
- public static int countActiveSubscriptions() {\r
- int count = 0;\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("select count(*) from SUBSCRIPTIONS");\r
- if (rs.next()) {\r
- count = rs.getInt(1);\r
- }\r
- rs.close();\r
- stmt.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- intlogger.warn("PROV0008 countActiveSubscriptions: "+e.getMessage());\r
- e.printStackTrace();\r
- }\r
- return count;\r
- }\r
-\r
- public Subscription() {\r
- this("", "", "");\r
- }\r
- public Subscription(String url, String user, String password) {\r
- this.subid = -1;\r
- this.feedid = -1;\r
- this.groupid = -1; //New field is added - Groups feature Rally:US708115 - 1610\r
- this.delivery = new SubDelivery(url, user, password, false);\r
- this.metadataOnly = false;\r
- this.subscriber = "";\r
- this.links = new SubLinks();\r
- this.suspended = false;\r
- this.last_mod = new Date();\r
- this.created_date = new Date();\r
- }\r
- public Subscription(ResultSet rs) throws SQLException {\r
- this.subid = rs.getInt("SUBID");\r
- this.feedid = rs.getInt("FEEDID");\r
- this.groupid = rs.getInt("GROUPID"); //New field is added - Groups feature Rally:US708115 - 1610\r
- this.delivery = new SubDelivery(rs);\r
- this.metadataOnly = rs.getBoolean("METADATA_ONLY");\r
- this.subscriber = rs.getString("SUBSCRIBER");\r
- this.links = new SubLinks(rs.getString("SELF_LINK"), URLUtilities.generateFeedURL(feedid), rs.getString("LOG_LINK"));\r
- this.suspended = rs.getBoolean("SUSPENDED");\r
- this.last_mod = rs.getDate("LAST_MOD");\r
- this.created_date = rs.getDate("CREATED_DATE");\r
- }\r
- public Subscription(JSONObject jo) throws InvalidObjectException {\r
- this("", "", "");\r
- try {\r
- // The JSONObject is assumed to contain a vnd.att-dr.subscription representation\r
- this.subid = jo.optInt("subid", -1);\r
- this.feedid = jo.optInt("feedid", -1);\r
- this.groupid = jo.optInt("groupid", -1); //New field is added - Groups feature Rally:US708115 - 1610 \r
-\r
- JSONObject jdeli = jo.getJSONObject("delivery");\r
- String url = jdeli.getString("url");\r
- String user = jdeli.getString("user");\r
- String password = jdeli.getString("password");\r
- boolean use100 = jdeli.getBoolean("use100");\r
-\r
- \r
- //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
- Properties p = (new DB()).getProperties();\r
- if(p.get("org.onap.dmaap.datarouter.provserver.https.relaxation").toString().equals("false") && !jo.has("sync")) {\r
- if (!url.startsWith("https://"))\r
- throw new InvalidObjectException("delivery URL is not HTTPS");\r
- }\r
-\r
- if (url.length() > 256)\r
- throw new InvalidObjectException("delivery url field is too long");\r
- if (user.length() > 20)\r
- throw new InvalidObjectException("delivery user field is too long");\r
- if (password.length() > 32)\r
- throw new InvalidObjectException("delivery password field is too long");\r
- this.delivery = new SubDelivery(url, user, password, use100);\r
-\r
- this.metadataOnly = jo.getBoolean("metadataOnly");\r
- this.suspended = jo.optBoolean("suspend", false);\r
-\r
- this.subscriber = jo.optString("subscriber", "");\r
- JSONObject jol = jo.optJSONObject("links");\r
- this.links = (jol == null) ? (new SubLinks()) : (new SubLinks(jol));\r
- } catch (InvalidObjectException e) {\r
- throw e;\r
- } catch (Exception e) {\r
- throw new InvalidObjectException("invalid JSON: "+e.getMessage());\r
- }\r
- }\r
- public int getSubid() {\r
- return subid;\r
- }\r
- public void setSubid(int subid) {\r
- this.subid = subid;\r
-\r
- // Create link URLs\r
- SubLinks sl = getLinks();\r
- sl.setSelf(URLUtilities.generateSubscriptionURL(subid));\r
- sl.setLog(URLUtilities.generateSubLogURL(subid));\r
- }\r
- public int getFeedid() {\r
- return feedid;\r
- }\r
- public void setFeedid(int feedid) {\r
- this.feedid = feedid;\r
-\r
- // Create link URLs\r
- SubLinks sl = getLinks();\r
- sl.setFeed(URLUtilities.generateFeedURL(feedid));\r
- }\r
-\r
- //New getter setters for Groups feature Rally:US708115 - 1610\r
- public int getGroupid() { \r
- return groupid; \r
- } \r
- public void setGroupid(int groupid) { \r
- this.groupid = groupid; \r
- }\r
-\r
- public SubDelivery getDelivery() {\r
- return delivery;\r
- }\r
- public void setDelivery(SubDelivery delivery) {\r
- this.delivery = delivery;\r
- }\r
- public boolean isMetadataOnly() {\r
- return metadataOnly;\r
- }\r
- public void setMetadataOnly(boolean metadataOnly) {\r
- this.metadataOnly = metadataOnly;\r
- }\r
- public boolean isSuspended() {\r
- return suspended;\r
- }\r
- public void setSuspended(boolean suspended) {\r
- this.suspended = suspended;\r
- }\r
- public String getSubscriber() {\r
- return subscriber;\r
- }\r
- public void setSubscriber(String subscriber) {\r
- if (subscriber != null) {\r
- if (subscriber.length() > 8)\r
- subscriber = subscriber.substring(0, 8);\r
- this.subscriber = subscriber;\r
- }\r
- }\r
- public SubLinks getLinks() {\r
- return links;\r
- }\r
- public void setLinks(SubLinks links) {\r
- this.links = links;\r
- }\r
-\r
- @Override\r
- public JSONObject asJSONObject() {\r
- JSONObject jo = new JSONObject();\r
- jo.put("subid", subid);\r
- jo.put("feedid", feedid);\r
- jo.put("groupid", groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
- jo.put("delivery", delivery.asJSONObject());\r
- jo.put("metadataOnly", metadataOnly);\r
- jo.put("subscriber", subscriber);\r
- jo.put("links", links.asJSONObject());\r
- jo.put("suspend", suspended);\r
- jo.put("last_mod", last_mod.getTime());\r
- jo.put("created_date", created_date.getTime());\r
- return jo;\r
- }\r
- public JSONObject asLimitedJSONObject() {\r
- JSONObject jo = asJSONObject();\r
- jo.remove("subid");\r
- jo.remove("feedid");\r
- jo.remove("last_mod");\r
- return jo;\r
- }\r
- public JSONObject asJSONObject(boolean hidepasswords) {\r
- JSONObject jo = asJSONObject();\r
- if (hidepasswords) {\r
- jo.remove("subid"); // we no longer hide passwords, however we do hide these\r
- jo.remove("feedid");\r
- jo.remove("last_mod");\r
- jo.remove("created_date");\r
- }\r
- return jo;\r
- }\r
- @Override\r
- public boolean doInsert(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- if (subid == -1) {\r
- // No feed ID assigned yet, so assign the next available one\r
- setSubid(next_subid++);\r
- }\r
- // In case we insert a feed from synchronization\r
- if (subid > next_subid)\r
- next_subid = subid+1;\r
-\r
- // Create the SUBSCRIPTIONS row\r
- String sql = "insert into SUBSCRIPTIONS (SUBID, FEEDID, DELIVERY_URL, DELIVERY_USER, DELIVERY_PASSWORD, DELIVERY_USE100, METADATA_ONLY, SUBSCRIBER, SUSPENDED, GROUPID) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";\r
- ps = c.prepareStatement(sql, new String[] { "SUBID" });\r
- ps.setInt(1, subid);\r
- ps.setInt(2, feedid);\r
- ps.setString(3, getDelivery().getUrl());\r
- ps.setString(4, getDelivery().getUser());\r
- ps.setString(5, getDelivery().getPassword());\r
- ps.setInt(6, getDelivery().isUse100()?1:0);\r
- ps.setInt(7, isMetadataOnly()?1:0);\r
- ps.setString(8, getSubscriber());\r
- ps.setBoolean(9, isSuspended());\r
- ps.setInt(10, groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
- ps.execute();\r
- ps.close();\r
-// ResultSet rs = ps.getGeneratedKeys();\r
-// rs.first();\r
-// setSubid(rs.getInt(1)); // side effect - sets the link URLs\r
-// ps.close();\r
-\r
- // Update the row to set the URLs\r
- sql = "update SUBSCRIPTIONS set SELF_LINK = ?, LOG_LINK = ? where SUBID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, getLinks().getSelf());\r
- ps.setString(2, getLinks().getLog());\r
- ps.setInt(3, subid);\r
- ps.execute();\r
- ps.close();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0005 doInsert: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public boolean doUpdate(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "update SUBSCRIPTIONS set DELIVERY_URL = ?, DELIVERY_USER = ?, DELIVERY_PASSWORD = ?, DELIVERY_USE100 = ?, METADATA_ONLY = ?, SUSPENDED = ?, GROUPID = ? where SUBID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, delivery.getUrl());\r
- ps.setString(2, delivery.getUser());\r
- ps.setString(3, delivery.getPassword());\r
- ps.setInt(4, delivery.isUse100()?1:0);\r
- ps.setInt(5, isMetadataOnly()?1:0);\r
- ps.setInt(6, suspended ? 1 : 0);\r
- ps.setInt(7, groupid); //New field is added - Groups feature Rally:US708115 - 1610 \r
- ps.setInt(8, subid);\r
- ps.executeUpdate();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
-\r
-\r
- \r
- /**Rally US708115\r
- * Change Ownership of Subscription - 1610\r
- * */\r
- public boolean changeOwnerShip() {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- \r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection c = db.getConnection();\r
- String sql = "update SUBSCRIPTIONS set SUBSCRIBER = ? where SUBID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setString(1, this.subscriber);\r
- ps.setInt(2, subid);\r
- ps.execute();\r
- ps.close();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0006 doUpdate: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- \r
-\r
-\r
- @Override\r
- public boolean doDelete(Connection c) {\r
- boolean rv = true;\r
- PreparedStatement ps = null;\r
- try {\r
- String sql = "delete from SUBSCRIPTIONS where SUBID = ?";\r
- ps = c.prepareStatement(sql);\r
- ps.setInt(1, subid);\r
- ps.execute();\r
- } catch (SQLException e) {\r
- rv = false;\r
- intlogger.warn("PROV0007 doDelete: "+e.getMessage());\r
- e.printStackTrace();\r
- } finally {\r
- try {\r
- ps.close();\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- }\r
- return rv;\r
- }\r
- @Override\r
- public String getKey() {\r
- return ""+getSubid();\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (!(obj instanceof Subscription))\r
- return false;\r
- Subscription os = (Subscription) obj;\r
- if (subid != os.subid)\r
- return false;\r
- if (feedid != os.feedid)\r
- return false;\r
- if (groupid != os.groupid) //New field is added - Groups feature Rally:US708115 - 1610 \r
- return false;\r
- if (!delivery.equals(os.delivery))\r
- return false;\r
- if (metadataOnly != os.metadataOnly)\r
- return false;\r
- if (!subscriber.equals(os.subscriber))\r
- return false;\r
- if (!links.equals(os.links))\r
- return false;\r
- if (suspended != os.suspended)\r
- return false;\r
- return true;\r
- }\r
-\r
- @Override\r
- public String toString() {\r
- return "SUB: subid=" + subid + ", feedid=" + feedid;\r
- }\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private static int next_subid = getMaxSubID() + 1;\r
+\r
+ private int subid;\r
+ private int feedid;\r
+ private int groupid; //New field is added - Groups feature Rally:US708115 - 1610\r
+ private SubDelivery delivery;\r
+ private boolean metadataOnly;\r
+ private String subscriber;\r
+ private SubLinks links;\r
+ private boolean suspended;\r
+ private Date last_mod;\r
+ private Date created_date;\r
+\r
+ public static Subscription getSubscriptionMatching(Subscription sub) {\r
+ SubDelivery deli = sub.getDelivery();\r
+ String sql = String.format(\r
+ "select * from SUBSCRIPTIONS where FEEDID = %d and DELIVERY_URL = \"%s\" and DELIVERY_USER = \"%s\" and DELIVERY_PASSWORD = \"%s\" and DELIVERY_USE100 = %d and METADATA_ONLY = %d",\r
+ sub.getFeedid(),\r
+ deli.getUrl(),\r
+ deli.getUser(),\r
+ deli.getPassword(),\r
+ deli.isUse100() ? 1 : 0,\r
+ sub.isMetadataOnly() ? 1 : 0\r
+ );\r
+ List<Subscription> list = getSubscriptionsForSQL(sql);\r
+ return list.size() > 0 ? list.get(0) : null;\r
+ }\r
+\r
+ public static Subscription getSubscriptionById(int id) {\r
+ String sql = "select * from SUBSCRIPTIONS where SUBID = " + id;\r
+ List<Subscription> list = getSubscriptionsForSQL(sql);\r
+ return list.size() > 0 ? list.get(0) : null;\r
+ }\r
+\r
+ public static Collection<Subscription> getAllSubscriptions() {\r
+ return getSubscriptionsForSQL("select * from SUBSCRIPTIONS");\r
+ }\r
+\r
+ private static List<Subscription> getSubscriptionsForSQL(String sql) {\r
+ List<Subscription> list = new ArrayList<Subscription>();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ Subscription sub = new Subscription(rs);\r
+ list.add(sub);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return list;\r
+ }\r
+\r
+ public static int getMaxSubID() {\r
+ int max = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select MAX(subid) from SUBSCRIPTIONS");\r
+ if (rs.next()) {\r
+ max = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.info("getMaxSubID: " + e.getMessage());\r
+ e.printStackTrace();\r
+ }\r
+ return max;\r
+ }\r
+\r
+ public static Collection<String> getSubscriptionUrlList(int feedid) {\r
+ List<String> list = new ArrayList<String>();\r
+ String sql = "select SUBID from SUBSCRIPTIONS where FEEDID = " + feedid;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ while (rs.next()) {\r
+ int subid = rs.getInt("SUBID");\r
+ list.add(URLUtilities.generateSubscriptionURL(subid));\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ return list;\r
+ }\r
+\r
+ /**\r
+ * Return a count of the number of active subscriptions in the DB.\r
+ *\r
+ * @return the count\r
+ */\r
+ public static int countActiveSubscriptions() {\r
+ int count = 0;\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("select count(*) from SUBSCRIPTIONS");\r
+ if (rs.next()) {\r
+ count = rs.getInt(1);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ intlogger.warn("PROV0008 countActiveSubscriptions: " + e.getMessage());\r
+ e.printStackTrace();\r
+ }\r
+ return count;\r
+ }\r
+\r
+ public Subscription() {\r
+ this("", "", "");\r
+ }\r
+\r
+ public Subscription(String url, String user, String password) {\r
+ this.subid = -1;\r
+ this.feedid = -1;\r
+ this.groupid = -1; //New field is added - Groups feature Rally:US708115 - 1610\r
+ this.delivery = new SubDelivery(url, user, password, false);\r
+ this.metadataOnly = false;\r
+ this.subscriber = "";\r
+ this.links = new SubLinks();\r
+ this.suspended = false;\r
+ this.last_mod = new Date();\r
+ this.created_date = new Date();\r
+ }\r
+\r
+ public Subscription(ResultSet rs) throws SQLException {\r
+ this.subid = rs.getInt("SUBID");\r
+ this.feedid = rs.getInt("FEEDID");\r
+ this.groupid = rs.getInt("GROUPID"); //New field is added - Groups feature Rally:US708115 - 1610\r
+ this.delivery = new SubDelivery(rs);\r
+ this.metadataOnly = rs.getBoolean("METADATA_ONLY");\r
+ this.subscriber = rs.getString("SUBSCRIBER");\r
+ this.links = new SubLinks(rs.getString("SELF_LINK"), URLUtilities.generateFeedURL(feedid), rs.getString("LOG_LINK"));\r
+ this.suspended = rs.getBoolean("SUSPENDED");\r
+ this.last_mod = rs.getDate("LAST_MOD");\r
+ this.created_date = rs.getDate("CREATED_DATE");\r
+ }\r
+\r
+ public Subscription(JSONObject jo) throws InvalidObjectException {\r
+ this("", "", "");\r
+ try {\r
+ // The JSONObject is assumed to contain a vnd.att-dr.subscription representation\r
+ this.subid = jo.optInt("subid", -1);\r
+ this.feedid = jo.optInt("feedid", -1);\r
+ this.groupid = jo.optInt("groupid", -1); //New field is added - Groups feature Rally:US708115 - 1610\r
+\r
+ JSONObject jdeli = jo.getJSONObject("delivery");\r
+ String url = jdeli.getString("url");\r
+ String user = jdeli.getString("user");\r
+ String password = jdeli.getString("password");\r
+ boolean use100 = jdeli.getBoolean("use100");\r
+\r
+\r
+ //Data Router Subscriber HTTPS Relaxation feature USERSTORYID:US674047.\r
+ Properties p = (new DB()).getProperties();\r
+ if (p.get("org.onap.dmaap.datarouter.provserver.https.relaxation").toString().equals("false") && !jo.has("sync")) {\r
+ if (!url.startsWith("https://"))\r
+ throw new InvalidObjectException("delivery URL is not HTTPS");\r
+ }\r
+\r
+ if (url.length() > 256)\r
+ throw new InvalidObjectException("delivery url field is too long");\r
+ if (user.length() > 20)\r
+ throw new InvalidObjectException("delivery user field is too long");\r
+ if (password.length() > 32)\r
+ throw new InvalidObjectException("delivery password field is too long");\r
+ this.delivery = new SubDelivery(url, user, password, use100);\r
+\r
+ this.metadataOnly = jo.getBoolean("metadataOnly");\r
+ this.suspended = jo.optBoolean("suspend", false);\r
+\r
+ this.subscriber = jo.optString("subscriber", "");\r
+ JSONObject jol = jo.optJSONObject("links");\r
+ this.links = (jol == null) ? (new SubLinks()) : (new SubLinks(jol));\r
+ } catch (InvalidObjectException e) {\r
+ throw e;\r
+ } catch (Exception e) {\r
+ throw new InvalidObjectException("invalid JSON: " + e.getMessage());\r
+ }\r
+ }\r
+\r
+ public int getSubid() {\r
+ return subid;\r
+ }\r
+\r
+ public void setSubid(int subid) {\r
+ this.subid = subid;\r
+\r
+ // Create link URLs\r
+ SubLinks sl = getLinks();\r
+ sl.setSelf(URLUtilities.generateSubscriptionURL(subid));\r
+ sl.setLog(URLUtilities.generateSubLogURL(subid));\r
+ }\r
+\r
+ public int getFeedid() {\r
+ return feedid;\r
+ }\r
+\r
+ public void setFeedid(int feedid) {\r
+ this.feedid = feedid;\r
+\r
+ // Create link URLs\r
+ SubLinks sl = getLinks();\r
+ sl.setFeed(URLUtilities.generateFeedURL(feedid));\r
+ }\r
+\r
+ //New getter setters for Groups feature Rally:US708115 - 1610\r
+ public int getGroupid() {\r
+ return groupid;\r
+ }\r
+\r
+ public void setGroupid(int groupid) {\r
+ this.groupid = groupid;\r
+ }\r
+\r
+ public SubDelivery getDelivery() {\r
+ return delivery;\r
+ }\r
+\r
+ public void setDelivery(SubDelivery delivery) {\r
+ this.delivery = delivery;\r
+ }\r
+\r
+ public boolean isMetadataOnly() {\r
+ return metadataOnly;\r
+ }\r
+\r
+ public void setMetadataOnly(boolean metadataOnly) {\r
+ this.metadataOnly = metadataOnly;\r
+ }\r
+\r
+ public boolean isSuspended() {\r
+ return suspended;\r
+ }\r
+\r
+ public void setSuspended(boolean suspended) {\r
+ this.suspended = suspended;\r
+ }\r
+\r
+ public String getSubscriber() {\r
+ return subscriber;\r
+ }\r
+\r
+ public void setSubscriber(String subscriber) {\r
+ if (subscriber != null) {\r
+ if (subscriber.length() > 8)\r
+ subscriber = subscriber.substring(0, 8);\r
+ this.subscriber = subscriber;\r
+ }\r
+ }\r
+\r
+ public SubLinks getLinks() {\r
+ return links;\r
+ }\r
+\r
+ public void setLinks(SubLinks links) {\r
+ this.links = links;\r
+ }\r
+\r
+ @Override\r
+ public JSONObject asJSONObject() {\r
+ JSONObject jo = new JSONObject();\r
+ jo.put("subid", subid);\r
+ jo.put("feedid", feedid);\r
+ jo.put("groupid", groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
+ jo.put("delivery", delivery.asJSONObject());\r
+ jo.put("metadataOnly", metadataOnly);\r
+ jo.put("subscriber", subscriber);\r
+ jo.put("links", links.asJSONObject());\r
+ jo.put("suspend", suspended);\r
+ jo.put("last_mod", last_mod.getTime());\r
+ jo.put("created_date", created_date.getTime());\r
+ return jo;\r
+ }\r
+\r
+ public JSONObject asLimitedJSONObject() {\r
+ JSONObject jo = asJSONObject();\r
+ jo.remove("subid");\r
+ jo.remove("feedid");\r
+ jo.remove("last_mod");\r
+ return jo;\r
+ }\r
+\r
+ public JSONObject asJSONObject(boolean hidepasswords) {\r
+ JSONObject jo = asJSONObject();\r
+ if (hidepasswords) {\r
+ jo.remove("subid"); // we no longer hide passwords, however we do hide these\r
+ jo.remove("feedid");\r
+ jo.remove("last_mod");\r
+ jo.remove("created_date");\r
+ }\r
+ return jo;\r
+ }\r
+\r
+ @Override\r
+ public boolean doInsert(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ if (subid == -1) {\r
+ // No feed ID assigned yet, so assign the next available one\r
+ setSubid(next_subid++);\r
+ }\r
+ // In case we insert a feed from synchronization\r
+ if (subid > next_subid)\r
+ next_subid = subid + 1;\r
+\r
+ // Create the SUBSCRIPTIONS row\r
+ String sql = "insert into SUBSCRIPTIONS (SUBID, FEEDID, DELIVERY_URL, DELIVERY_USER, DELIVERY_PASSWORD, DELIVERY_USE100, METADATA_ONLY, SUBSCRIBER, SUSPENDED, GROUPID) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";\r
+ ps = c.prepareStatement(sql, new String[]{"SUBID"});\r
+ ps.setInt(1, subid);\r
+ ps.setInt(2, feedid);\r
+ ps.setString(3, getDelivery().getUrl());\r
+ ps.setString(4, getDelivery().getUser());\r
+ ps.setString(5, getDelivery().getPassword());\r
+ ps.setInt(6, getDelivery().isUse100() ? 1 : 0);\r
+ ps.setInt(7, isMetadataOnly() ? 1 : 0);\r
+ ps.setString(8, getSubscriber());\r
+ ps.setBoolean(9, isSuspended());\r
+ ps.setInt(10, groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
+ ps.execute();\r
+ ps.close();\r
+// ResultSet rs = ps.getGeneratedKeys();\r
+// rs.first();\r
+// setSubid(rs.getInt(1)); // side effect - sets the link URLs\r
+// ps.close();\r
+\r
+ // Update the row to set the URLs\r
+ sql = "update SUBSCRIPTIONS set SELF_LINK = ?, LOG_LINK = ? where SUBID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, getLinks().getSelf());\r
+ ps.setString(2, getLinks().getLog());\r
+ ps.setInt(3, subid);\r
+ ps.execute();\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0005 doInsert: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public boolean doUpdate(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "update SUBSCRIPTIONS set DELIVERY_URL = ?, DELIVERY_USER = ?, DELIVERY_PASSWORD = ?, DELIVERY_USE100 = ?, METADATA_ONLY = ?, SUSPENDED = ?, GROUPID = ? where SUBID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, delivery.getUrl());\r
+ ps.setString(2, delivery.getUser());\r
+ ps.setString(3, delivery.getPassword());\r
+ ps.setInt(4, delivery.isUse100() ? 1 : 0);\r
+ ps.setInt(5, isMetadataOnly() ? 1 : 0);\r
+ ps.setInt(6, suspended ? 1 : 0);\r
+ ps.setInt(7, groupid); //New field is added - Groups feature Rally:US708115 - 1610\r
+ ps.setInt(8, subid);\r
+ ps.executeUpdate();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+\r
+ /**\r
+ * Rally US708115\r
+ * Change Ownership of Subscription - 1610\r
+ */\r
+ public boolean changeOwnerShip() {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection c = db.getConnection();\r
+ String sql = "update SUBSCRIPTIONS set SUBSCRIBER = ? where SUBID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setString(1, this.subscriber);\r
+ ps.setInt(2, subid);\r
+ ps.execute();\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0006 doUpdate: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+\r
+ @Override\r
+ public boolean doDelete(Connection c) {\r
+ boolean rv = true;\r
+ PreparedStatement ps = null;\r
+ try {\r
+ String sql = "delete from SUBSCRIPTIONS where SUBID = ?";\r
+ ps = c.prepareStatement(sql);\r
+ ps.setInt(1, subid);\r
+ ps.execute();\r
+ } catch (SQLException e) {\r
+ rv = false;\r
+ intlogger.warn("PROV0007 doDelete: " + e.getMessage());\r
+ e.printStackTrace();\r
+ } finally {\r
+ try {\r
+ ps.close();\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ @Override\r
+ public String getKey() {\r
+ return "" + getSubid();\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (!(obj instanceof Subscription))\r
+ return false;\r
+ Subscription os = (Subscription) obj;\r
+ if (subid != os.subid)\r
+ return false;\r
+ if (feedid != os.feedid)\r
+ return false;\r
+ if (groupid != os.groupid) //New field is added - Groups feature Rally:US708115 - 1610\r
+ return false;\r
+ if (!delivery.equals(os.delivery))\r
+ return false;\r
+ if (metadataOnly != os.metadataOnly)\r
+ return false;\r
+ if (!subscriber.equals(os.subscriber))\r
+ return false;\r
+ if (!links.equals(os.links))\r
+ return false;\r
+ if (suspended != os.suspended)\r
+ return false;\r
+ return true;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return "SUB: subid=" + subid + ", feedid=" + feedid;\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: Syncable.java,v 1.1 2013/07/05 13:48:05 eby Exp $\r
*/\r
public abstract class Syncable implements Deleteable, Insertable, Updateable, JSONable {\r
- @Override\r
- abstract public JSONObject asJSONObject();\r
+ @Override\r
+ abstract public JSONObject asJSONObject();\r
\r
- @Override\r
- abstract public boolean doUpdate(Connection c);\r
+ @Override\r
+ abstract public boolean doUpdate(Connection c);\r
\r
- @Override\r
- abstract public boolean doInsert(Connection c);\r
+ @Override\r
+ abstract public boolean doInsert(Connection c);\r
\r
- @Override\r
- abstract public boolean doDelete(Connection c);\r
+ @Override\r
+ abstract public boolean doDelete(Connection c);\r
\r
- /**\r
- * Get the "natural key" for this object type, as a String.\r
- * @return the key\r
- */\r
- abstract public String getKey();\r
+ /**\r
+ * Get the "natural key" for this object type, as a String.\r
+ *\r
+ * @return the key\r
+ */\r
+ abstract public String getKey();\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
\r
/**\r
* An object that can be UPDATE-ed in the database.\r
+ *\r
* @author Robert Eby\r
* @version $Id: Updateable.java,v 1.2 2013/05/29 14:44:36 eby Exp $\r
*/\r
public interface Updateable {\r
- /**\r
- * Update this object in the DB.\r
- * @param c the JDBC Connection to use\r
- * @return true if the UPDATE succeeded, false otherwise\r
- */\r
- public boolean doUpdate(Connection c);\r
+ /**\r
+ * Update this object in the DB.\r
+ *\r
+ * @param c the JDBC Connection to use\r
+ * @return true if the UPDATE succeeded, false otherwise\r
+ */\r
+ public boolean doUpdate(Connection c);\r
}\r
# ============LICENSE_START==================================================\r
# * org.onap.dmaap\r
# * ===========================================================================\r
-# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
+# * Copyright � 2017 AT&T Intellectual Property. All rights reserved.\r
# * ===========================================================================\r
# * Licensed under the Apache License, Version 2.0 (the "License");\r
# * you may not use this file except in compliance with the License.\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
import com.att.eelf.i18n.EELFResourceManager;\r
\r
public enum EelfMsgs implements EELFResolvableErrorEnum {\r
- \r
- /**\r
+\r
+ /**\r
* Application message prints user (accepts one argument)\r
*/\r
- MESSAGE_WITH_BEHALF,\r
+ MESSAGE_WITH_BEHALF,\r
\r
- /**\r
+ /**\r
* Application message prints user and FeedID (accepts two arguments)\r
*/\r
\r
- MESSAGE_WITH_BEHALF_AND_FEEDID,\r
+ MESSAGE_WITH_BEHALF_AND_FEEDID,\r
\r
- /**\r
+ /**\r
* Application message prints user and SUBID (accepts two arguments)\r
*/\r
\r
- MESSAGE_WITH_BEHALF_AND_SUBID;\r
+ MESSAGE_WITH_BEHALF_AND_SUBID;\r
+\r
\r
- \r
- \r
/**\r
* Static initializer to ensure the resource bundles for this class are loaded...\r
* Here this application loads messages from three bundles\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
import ch.qos.logback.core.filter.Filter;\r
import ch.qos.logback.core.spi.FilterReply;\r
\r
-public class JettyFilter extends Filter<ILoggingEvent>{\r
- @Override\r
- public FilterReply decide(ILoggingEvent event) { \r
- if (event.getLoggerName().contains("org.eclipse.jetty")) {\r
- return FilterReply.ACCEPT;\r
- } else {\r
- return FilterReply.DENY;\r
- }\r
- }\r
+public class JettyFilter extends Filter<ILoggingEvent> {\r
+ @Override\r
+ public FilterReply decide(ILoggingEvent event) {\r
+ if (event.getLoggerName().contains("org.eclipse.jetty")) {\r
+ return FilterReply.ACCEPT;\r
+ } else {\r
+ return FilterReply.DENY;\r
+ }\r
+ }\r
}\r
-#-------------------------------------------------------------------------------\r
-# ============LICENSE_START==================================================\r
-# * org.onap.dmaap\r
-# * ===========================================================================\r
-# * Copyright � 2017 AT&T Intellectual Property. All rights reserved.\r
-# * ===========================================================================\r
-# * Licensed under the Apache License, Version 2.0 (the "License");\r
-# * you may not use this file except in compliance with the License.\r
-# * You may obtain a copy of the License at\r
-# * \r
-# * http://www.apache.org/licenses/LICENSE-2.0\r
-# * \r
-# * Unless required by applicable law or agreed to in writing, software\r
-# * distributed under the License is distributed on an "AS IS" BASIS,\r
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-# * See the License for the specific language governing permissions and\r
-# * limitations under the License.\r
-# * ============LICENSE_END====================================================\r
-# *\r
-# * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
-# *\r
-#-------------------------------------------------------------------------------\r
-\r
-<html>\r
-<body>\r
-<p>\r
-This package provides the servlets used by the provisioning server for the Data Router application.\r
-URLs are from the document <b>URLs for DR Release 1</b> <i>Version 1.2</i>.\r
-</p>\r
-<div class="contentContainer">\r
-<table class="packageSummary" border="0" cellpadding="3" cellspacing="0">\r
-<caption><span>URL Path Summary</span><span class="tabEnd"> </span></caption>\r
-<tr class="altColor">\r
- <th class="colFirst">URL Path</th>\r
- <th class="colOne">Symbolic Name</th>\r
- <th class="colLast">Servlet Name</th>\r
- <th class="colLast" colspan="4">Allowed Methods</th>\r
-</tr>\r
-<tr>\r
- <td class="colFirst" class="colOne">/</td>\r
- <td class="colOne" class="colOne"><drFeedsUrl></td>\r
- <td class="colLast" class="colOne">{@link org.onap.dmaap.datarouter.provisioning.DRFeedsServlet}</td>\r
- <td class="colLast" class="colOne" style="background-color: pink">DELETE</td>\r
- <td class="colLast" class="colOne" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" class="colOne" style="background-color: lightgreen">POST</td>\r
- <td class="colLast" class="colOne" style="background-color: pink">PUT</td>\r
-</tr>\r
-<tr class="altColor">\r
- <td class="colFirst" class="colOne">/feed/feedid</td>\r
- <td class="colOne" class="colOne"><feedUrl></td>\r
- <td class="colLast" class="colOne">{@link org.onap.dmaap.datarouter.provisioning.FeedServlet}</td>\r
- <td class="colLast" style="background-color: lightgreen">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: pink">POST</td>\r
- <td class="colLast" style="background-color: lightgreen">PUT</td>\r
-</tr>\r
-<tr>\r
- <td class="colFirst">/publish/feedid</td>\r
- <td class="colOne"><publishUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.PublishServlet}</td>\r
- <td class="colLast" style="background-color: lightgreen">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: lightgreen">POST</td>\r
- <td class="colLast" style="background-color: lightgreen">PUT</td>\r
-</tr>\r
-<tr class="altColor">\r
- <td class="colFirst">/subscribe/feedid</td>\r
- <td class="colOne"><subscribeUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.SubscribeServlet}</td>\r
- <td class="colLast" style="background-color: pink">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: lightgreen">POST</td>\r
- <td class="colLast" style="background-color: pink">PUT</td>\r
-</tr>\r
-<tr>\r
- <td class="colFirst">/feedlog/feedid</td>\r
- <td class="colOne"><feedLogUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.FeedLogServlet}</td>\r
- <td class="colLast" style="background-color: pink">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: pink">POST</td>\r
- <td class="colLast" style="background-color: pink">PUT</td>\r
-</tr>\r
-<tr class="altColor">\r
- <td class="colFirst">/subs/subid</td>\r
- <td class="colOne"><subscriptionUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.SubscriptionServlet}</td>\r
- <td class="colLast" style="background-color: lightgreen">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: lightgreen">POST</td>\r
- <td class="colLast" style="background-color: lightgreen">PUT</td>\r
-</tr>\r
-<tr>\r
- <td class="colFirst">/sublog/subid</td>\r
- <td class="colOne"><subLogUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.SubLogServlet}</td>\r
- <td class="colLast" style="background-color: pink">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: pink">POST</td>\r
- <td class="colLast" style="background-color: pink">PUT</td>\r
-</tr>\r
-<tr class="altColor">\r
- <td class="colFirst">/internal/*</td>\r
- <td class="colOne"><internalUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.InternalServlet}</td>\r
- <td class="colLast" style="background-color: lightgreen">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: lightgreen">POST</td>\r
- <td class="colLast" style="background-color: lightgreen">PUT</td>\r
-</tr>\r
-<tr>\r
- <td class="colFirst">/internal/route/*</td>\r
- <td class="colOne"><routeUrl></td>\r
- <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.RouteServlet}</td>\r
- <td class="colLast" style="background-color: lightgreen">DELETE</td>\r
- <td class="colLast" style="background-color: lightgreen">GET</td>\r
- <td class="colLast" style="background-color: lightgreen">POST</td>\r
- <td class="colLast" style="background-color: pink">PUT</td>\r
-</tr>\r
-</table>\r
-</div>\r
-</body>\r
-</html>\r
+#-------------------------------------------------------------------------------
+# ============LICENSE_START==================================================
+# * org.onap.dmaap
+# * ===========================================================================
+# * Copyright � 2017 AT&T Intellectual Property. All rights reserved.
+# * ===========================================================================
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# * ============LICENSE_END====================================================
+# *
+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+# *
+#-------------------------------------------------------------------------------
+
+<html>
+<body>
+<p>
+This package provides the servlets used by the provisioning server for the Data Router application.
+URLs are from the document <b>URLs for DR Release 1</b> <i>Version 1.2</i>.
+</p>
+<div class="contentContainer">
+<table class="packageSummary" border="0" cellpadding="3" cellspacing="0">
+<caption><span>URL Path Summary</span><span class="tabEnd"> </span></caption>
+<tr class="altColor">
+ <th class="colFirst">URL Path</th>
+ <th class="colOne">Symbolic Name</th>
+ <th class="colLast">Servlet Name</th>
+ <th class="colLast" colspan="4">Allowed Methods</th>
+</tr>
+<tr>
+ <td class="colFirst" class="colOne">/</td>
+ <td class="colOne" class="colOne"><drFeedsUrl></td>
+ <td class="colLast" class="colOne">{@link org.onap.dmaap.datarouter.provisioning.DRFeedsServlet}</td>
+ <td class="colLast" class="colOne" style="background-color: pink">DELETE</td>
+ <td class="colLast" class="colOne" style="background-color: lightgreen">GET</td>
+ <td class="colLast" class="colOne" style="background-color: lightgreen">POST</td>
+ <td class="colLast" class="colOne" style="background-color: pink">PUT</td>
+</tr>
+<tr class="altColor">
+ <td class="colFirst" class="colOne">/feed/feedid</td>
+ <td class="colOne" class="colOne"><feedUrl></td>
+ <td class="colLast" class="colOne">{@link org.onap.dmaap.datarouter.provisioning.FeedServlet}</td>
+ <td class="colLast" style="background-color: lightgreen">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: pink">POST</td>
+ <td class="colLast" style="background-color: lightgreen">PUT</td>
+</tr>
+<tr>
+ <td class="colFirst">/publish/feedid</td>
+ <td class="colOne"><publishUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.PublishServlet}</td>
+ <td class="colLast" style="background-color: lightgreen">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: lightgreen">POST</td>
+ <td class="colLast" style="background-color: lightgreen">PUT</td>
+</tr>
+<tr class="altColor">
+ <td class="colFirst">/subscribe/feedid</td>
+ <td class="colOne"><subscribeUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.SubscribeServlet}</td>
+ <td class="colLast" style="background-color: pink">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: lightgreen">POST</td>
+ <td class="colLast" style="background-color: pink">PUT</td>
+</tr>
+<tr>
+ <td class="colFirst">/feedlog/feedid</td>
+ <td class="colOne"><feedLogUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.FeedLogServlet}</td>
+ <td class="colLast" style="background-color: pink">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: pink">POST</td>
+ <td class="colLast" style="background-color: pink">PUT</td>
+</tr>
+<tr class="altColor">
+ <td class="colFirst">/subs/subid</td>
+ <td class="colOne"><subscriptionUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.SubscriptionServlet}</td>
+ <td class="colLast" style="background-color: lightgreen">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: lightgreen">POST</td>
+ <td class="colLast" style="background-color: lightgreen">PUT</td>
+</tr>
+<tr>
+ <td class="colFirst">/sublog/subid</td>
+ <td class="colOne"><subLogUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.SubLogServlet}</td>
+ <td class="colLast" style="background-color: pink">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: pink">POST</td>
+ <td class="colLast" style="background-color: pink">PUT</td>
+</tr>
+<tr class="altColor">
+ <td class="colFirst">/internal/*</td>
+ <td class="colOne"><internalUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.InternalServlet}</td>
+ <td class="colLast" style="background-color: lightgreen">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: lightgreen">POST</td>
+ <td class="colLast" style="background-color: lightgreen">PUT</td>
+</tr>
+<tr>
+ <td class="colFirst">/internal/route/*</td>
+ <td class="colOne"><routeUrl></td>
+ <td class="colLast">{@link org.onap.dmaap.datarouter.provisioning.RouteServlet}</td>
+ <td class="colLast" style="background-color: lightgreen">DELETE</td>
+ <td class="colLast" style="background-color: lightgreen">GET</td>
+ <td class="colLast" style="background-color: lightgreen">POST</td>
+ <td class="colLast" style="background-color: pink">PUT</td>
+</tr>
+</table>
+</div>
+</body>
+</html>
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id$\r
*/\r
public class DB {\r
- /** The name of the properties file (in CLASSPATH) */\r
- private static final String CONFIG_FILE = "provserver.properties";\r
-\r
- private static String DB_URL;\r
- private static String DB_LOGIN;\r
- private static String DB_PASSWORD;\r
- private static Properties props;\r
- private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private static final Queue<Connection> queue = new LinkedList<>();\r
-\r
- public static String HTTPS_PORT;\r
- public static String HTTP_PORT;\r
-\r
- /**\r
- * Construct a DB object. If this is the very first creation of this object, it will load a copy\r
- * of the properties for the server, and attempt to load the JDBC driver for the database. If a fatal\r
- * error occurs (e.g. either the properties file or the DB driver is missing), the JVM will exit.\r
- */\r
- public DB() {\r
- if (props == null) {\r
- props = new Properties();\r
- try (InputStream inStream = getClass().getClassLoader().getResourceAsStream(CONFIG_FILE)) {\r
- props.load(inStream);\r
- String DB_DRIVER = (String) props.get("org.onap.dmaap.datarouter.db.driver");\r
- DB_URL = (String) props.get("org.onap.dmaap.datarouter.db.url");\r
- DB_LOGIN = (String) props.get("org.onap.dmaap.datarouter.db.login");\r
- DB_PASSWORD = (String) props.get("org.onap.dmaap.datarouter.db.password");\r
- HTTPS_PORT = (String) props.get("org.onap.dmaap.datarouter.provserver.https.port");\r
- HTTP_PORT = (String) props.get("org.onap.dmaap.datarouter.provserver.http.port");\r
- Class.forName(DB_DRIVER);\r
- } catch (IOException e) {\r
- intlogger.fatal("PROV9003 Opening properties: " + e.getMessage());\r
- e.printStackTrace();\r
- System.exit(1);\r
- } catch (ClassNotFoundException e) {\r
- intlogger.fatal("PROV9004 cannot find the DB driver: " + e);\r
- e.printStackTrace();\r
- System.exit(1);\r
- }\r
- }\r
- }\r
- /**\r
- * Get the provisioning server properties (loaded from provserver.properties).\r
- * @return the Properties object\r
- */\r
- public Properties getProperties() {\r
- return props;\r
- }\r
- /**\r
- * Get a JDBC connection to the DB from the pool. Creates a new one if none are available.\r
- * @return the Connection\r
- * @throws SQLException\r
- */\r
- @SuppressWarnings("resource")\r
- public Connection getConnection() throws SQLException {\r
- Connection connection = null;\r
- while (connection == null) {\r
- synchronized (queue) {\r
- try {\r
- connection = queue.remove();\r
- } catch (NoSuchElementException nseEx) {\r
- int n = 0;\r
- do {\r
- // Try up to 3 times to get a connection\r
- try {\r
- connection = DriverManager.getConnection(DB_URL, DB_LOGIN, DB_PASSWORD);\r
- } catch (SQLException sqlEx) {\r
- if (++n >= 3)\r
- throw sqlEx;\r
- }\r
- } while (connection == null);\r
- }\r
- }\r
- if (connection != null && !connection.isValid(1)) {\r
- connection.close();\r
- connection = null;\r
- }\r
- }\r
- return connection;\r
- }\r
- /**\r
- * Returns a JDBC connection to the pool.\r
- * @param connection the Connection to return\r
- */\r
- public void release(Connection connection) {\r
- if (connection != null) {\r
- synchronized (queue) {\r
- if (!queue.contains(connection))\r
- queue.add(connection);\r
- }\r
- }\r
- }\r
-\r
- /**\r
- * Run all necessary retrofits required to bring the database up to the level required for this version\r
- * of the provisioning server. This should be run before the server itself is started.\r
- * @return true if all retrofits worked, false otherwise\r
- */\r
- public boolean runRetroFits() {\r
- return retroFit1();\r
- }\r
-\r
- /**\r
- * Retrofit 1 - Make sure the expected tables are in DB and are initialized.\r
- * Uses sql_init_01.sql to setup the DB.\r
- * @return true if the retrofit worked, false otherwise\r
- */\r
- private boolean retroFit1() {\r
- final String[] expectedTables = {\r
- "FEEDS", "FEED_ENDPOINT_ADDRS", "FEED_ENDPOINT_IDS", "PARAMETERS",\r
- "SUBSCRIPTIONS", "LOG_RECORDS", "INGRESS_ROUTES", "EGRESS_ROUTES",\r
- "NETWORK_ROUTES", "NODESETS", "NODES", "GROUPS"\r
- };\r
- Connection connection = null;\r
- try {\r
- connection = getConnection();\r
- Set<String> actualTables = getTableSet(connection);\r
- boolean initialize = false;\r
- for (String table : expectedTables) {\r
- initialize |= !actualTables.contains(table);\r
- }\r
- if (initialize) {\r
- intlogger.info("PROV9001: First time startup; The database is being initialized.");\r
- runInitScript(connection, 1);\r
- }\r
- } catch (SQLException e) {\r
- intlogger.fatal("PROV9000: The database credentials are not working: "+e.getMessage());\r
- return false;\r
- } finally {\r
- if (connection != null)\r
- release(connection);\r
- }\r
- return true;\r
- }\r
-\r
- /**\r
- * Get a set of all table names in the DB.\r
- * @param connection a DB connection\r
- * @return the set of table names\r
- */\r
- private Set<String> getTableSet(Connection connection) {\r
- Set<String> tables = new HashSet<String>();\r
- try {\r
- DatabaseMetaData md = connection.getMetaData();\r
- ResultSet rs = md.getTables("datarouter", "", "", null);\r
- if (rs != null) {\r
- while (rs.next()) {\r
- tables.add(rs.getString("TABLE_NAME"));\r
- }\r
- rs.close();\r
- }\r
- } catch (SQLException e) {\r
- }\r
- return tables;\r
- }\r
- /**\r
- * Initialize the tables by running the initialization scripts located in the directory specified\r
- * by the property <i>org.onap.dmaap.datarouter.provserver.dbscripts</i>. Scripts have names of\r
- * the form sql_init_NN.sql\r
- * @param connection a DB connection\r
- * @param scriptId the number of the sql_init_NN.sql script to run\r
- */\r
- private void runInitScript(Connection connection, int scriptId) {\r
- String scriptDir = (String) props.get("org.onap.dmaap.datarouter.provserver.dbscripts");\r
- StringBuilder sb = new StringBuilder();\r
- try {\r
- String scriptFile = String.format("%s/sql_init_%02d.sql", scriptDir, scriptId);\r
- if (!(new File(scriptFile)).exists())\r
- return;\r
-\r
- LineNumberReader in = new LineNumberReader(new FileReader(scriptFile));\r
- String line;\r
- while ((line = in.readLine()) != null) {\r
- if (!line.startsWith("--")) {\r
- line = line.trim();\r
- sb.append(line);\r
- if (line.endsWith(";")) {\r
- // Execute one DDL statement\r
- String sql = sb.toString();\r
- sb.setLength(0);\r
- Statement s = connection.createStatement();\r
- s.execute(sql);\r
- s.close();\r
- }\r
- }\r
- }\r
- in.close();\r
- sb.setLength(0);\r
- } catch (Exception e) {\r
- intlogger.fatal("PROV9002 Error when initializing table: "+e.getMessage());\r
- System.exit(1);\r
- }\r
- }\r
+ /**\r
+ * The name of the properties file (in CLASSPATH)\r
+ */\r
+ private static final String CONFIG_FILE = "provserver.properties";\r
+\r
+ private static String DB_URL;\r
+ private static String DB_LOGIN;\r
+ private static String DB_PASSWORD;\r
+ private static Properties props;\r
+ private static Logger intlogger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private static final Queue<Connection> queue = new LinkedList<>();\r
+\r
+ public static String HTTPS_PORT;\r
+ public static String HTTP_PORT;\r
+\r
+ /**\r
+ * Construct a DB object. If this is the very first creation of this object, it will load a copy\r
+ * of the properties for the server, and attempt to load the JDBC driver for the database. If a fatal\r
+ * error occurs (e.g. either the properties file or the DB driver is missing), the JVM will exit.\r
+ */\r
+ public DB() {\r
+ if (props == null) {\r
+ props = new Properties();\r
+ try (InputStream inStream = getClass().getClassLoader().getResourceAsStream(CONFIG_FILE)) {\r
+ props.load(inStream);\r
+ String DB_DRIVER = (String) props.get("org.onap.dmaap.datarouter.db.driver");\r
+ DB_URL = (String) props.get("org.onap.dmaap.datarouter.db.url");\r
+ DB_LOGIN = (String) props.get("org.onap.dmaap.datarouter.db.login");\r
+ DB_PASSWORD = (String) props.get("org.onap.dmaap.datarouter.db.password");\r
+ HTTPS_PORT = (String) props.get("org.onap.dmaap.datarouter.provserver.https.port");\r
+ HTTP_PORT = (String) props.get("org.onap.dmaap.datarouter.provserver.http.port");\r
+ Class.forName(DB_DRIVER);\r
+ } catch (IOException e) {\r
+ intlogger.fatal("PROV9003 Opening properties: " + e.getMessage());\r
+ e.printStackTrace();\r
+ System.exit(1);\r
+ } catch (ClassNotFoundException e) {\r
+ intlogger.fatal("PROV9004 cannot find the DB driver: " + e);\r
+ e.printStackTrace();\r
+ System.exit(1);\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Get the provisioning server properties (loaded from provserver.properties).\r
+ *\r
+ * @return the Properties object\r
+ */\r
+ public Properties getProperties() {\r
+ return props;\r
+ }\r
+\r
+ /**\r
+ * Get a JDBC connection to the DB from the pool. Creates a new one if none are available.\r
+ *\r
+ * @return the Connection\r
+ * @throws SQLException\r
+ */\r
+ @SuppressWarnings("resource")\r
+ public Connection getConnection() throws SQLException {\r
+ Connection connection = null;\r
+ while (connection == null) {\r
+ synchronized (queue) {\r
+ try {\r
+ connection = queue.remove();\r
+ } catch (NoSuchElementException nseEx) {\r
+ int n = 0;\r
+ do {\r
+ // Try up to 3 times to get a connection\r
+ try {\r
+ connection = DriverManager.getConnection(DB_URL, DB_LOGIN, DB_PASSWORD);\r
+ } catch (SQLException sqlEx) {\r
+ if (++n >= 3)\r
+ throw sqlEx;\r
+ }\r
+ } while (connection == null);\r
+ }\r
+ }\r
+ if (connection != null && !connection.isValid(1)) {\r
+ connection.close();\r
+ connection = null;\r
+ }\r
+ }\r
+ return connection;\r
+ }\r
+\r
+ /**\r
+ * Returns a JDBC connection to the pool.\r
+ *\r
+ * @param connection the Connection to return\r
+ */\r
+ public void release(Connection connection) {\r
+ if (connection != null) {\r
+ synchronized (queue) {\r
+ if (!queue.contains(connection))\r
+ queue.add(connection);\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Run all necessary retrofits required to bring the database up to the level required for this version\r
+ * of the provisioning server. This should be run before the server itself is started.\r
+ *\r
+ * @return true if all retrofits worked, false otherwise\r
+ */\r
+ public boolean runRetroFits() {\r
+ return retroFit1();\r
+ }\r
+\r
+ /**\r
+ * Retrofit 1 - Make sure the expected tables are in DB and are initialized.\r
+ * Uses sql_init_01.sql to setup the DB.\r
+ *\r
+ * @return true if the retrofit worked, false otherwise\r
+ */\r
+ private boolean retroFit1() {\r
+ final String[] expectedTables = {\r
+ "FEEDS", "FEED_ENDPOINT_ADDRS", "FEED_ENDPOINT_IDS", "PARAMETERS",\r
+ "SUBSCRIPTIONS", "LOG_RECORDS", "INGRESS_ROUTES", "EGRESS_ROUTES",\r
+ "NETWORK_ROUTES", "NODESETS", "NODES", "GROUPS"\r
+ };\r
+ Connection connection = null;\r
+ try {\r
+ connection = getConnection();\r
+ Set<String> actualTables = getTableSet(connection);\r
+ boolean initialize = false;\r
+ for (String table : expectedTables) {\r
+ initialize |= !actualTables.contains(table);\r
+ }\r
+ if (initialize) {\r
+ intlogger.info("PROV9001: First time startup; The database is being initialized.");\r
+ runInitScript(connection, 1);\r
+ }\r
+ } catch (SQLException e) {\r
+ intlogger.fatal("PROV9000: The database credentials are not working: " + e.getMessage());\r
+ return false;\r
+ } finally {\r
+ if (connection != null)\r
+ release(connection);\r
+ }\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Get a set of all table names in the DB.\r
+ *\r
+ * @param connection a DB connection\r
+ * @return the set of table names\r
+ */\r
+ private Set<String> getTableSet(Connection connection) {\r
+ Set<String> tables = new HashSet<String>();\r
+ try {\r
+ DatabaseMetaData md = connection.getMetaData();\r
+ ResultSet rs = md.getTables("datarouter", "", "", null);\r
+ if (rs != null) {\r
+ while (rs.next()) {\r
+ tables.add(rs.getString("TABLE_NAME"));\r
+ }\r
+ rs.close();\r
+ }\r
+ } catch (SQLException e) {\r
+ }\r
+ return tables;\r
+ }\r
+\r
+ /**\r
+ * Initialize the tables by running the initialization scripts located in the directory specified\r
+ * by the property <i>org.onap.dmaap.datarouter.provserver.dbscripts</i>. Scripts have names of\r
+ * the form sql_init_NN.sql\r
+ *\r
+ * @param connection a DB connection\r
+ * @param scriptId the number of the sql_init_NN.sql script to run\r
+ */\r
+ private void runInitScript(Connection connection, int scriptId) {\r
+ String scriptDir = (String) props.get("org.onap.dmaap.datarouter.provserver.dbscripts");\r
+ StringBuilder sb = new StringBuilder();\r
+ try {\r
+ String scriptFile = String.format("%s/sql_init_%02d.sql", scriptDir, scriptId);\r
+ if (!(new File(scriptFile)).exists())\r
+ return;\r
+\r
+ LineNumberReader in = new LineNumberReader(new FileReader(scriptFile));\r
+ String line;\r
+ while ((line = in.readLine()) != null) {\r
+ if (!line.startsWith("--")) {\r
+ line = line.trim();\r
+ sb.append(line);\r
+ if (line.endsWith(";")) {\r
+ // Execute one DDL statement\r
+ String sql = sb.toString();\r
+ sb.setLength(0);\r
+ Statement s = connection.createStatement();\r
+ s.execute(sql);\r
+ s.close();\r
+ }\r
+ }\r
+ }\r
+ in.close();\r
+ sb.setLength(0);\r
+ } catch (Exception e) {\r
+ intlogger.fatal("PROV9002 Error when initializing table: " + e.getMessage());\r
+ System.exit(1);\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: DRRouteCLI.java,v 1.2 2013/11/05 15:54:16 eby Exp $\r
*/\r
public class DRRouteCLI {\r
- /**\r
- * Invoke the CLI. The CLI can be run with a single command (given as command line arguments),\r
- * or in an interactive mode where the user types a sequence of commands to the program. The CLI is invoked via:\r
- * <pre>\r
- * java org.onap.dmaap.datarouter.provisioning.utils.DRRouteCLI [ -s <i>server</i> ] [ <i>command</i> ]\r
- * </pre>\r
- * A full description of the arguments to this command are\r
- * <a href="http://wiki.proto.research.att.com/doku.php?id=datarouter-route-cli">here</a>.\r
- *\r
- * @param args command line arguments\r
- * @throws Exception for any unrecoverable problem\r
- */\r
- public static void main(String[] args) throws Exception {\r
- String server = System.getenv(ENV_VAR);\r
- if (args.length >= 2 && args[0].equals("-s")) {\r
- server = args[1];\r
- String[] t = new String[args.length-2];\r
- if (t.length > 0)\r
- System.arraycopy(args, 2, t, 0, t.length);\r
- args = t;\r
- }\r
- if (server == null || server.equals("")) {\r
- System.err.println("dr-route: you need to specify a server, either via $PROVSRVR or the '-s' option.");\r
- System.exit(1);\r
- }\r
- DRRouteCLI cli = new DRRouteCLI(server);\r
- if (args.length > 0) {\r
- boolean b = cli.runCommand(args);\r
- System.exit(b ? 0 : 1);\r
- } else {\r
- cli.interactive();\r
- System.exit(0);\r
- }\r
- }\r
+ /**\r
+ * Invoke the CLI. The CLI can be run with a single command (given as command line arguments),\r
+ * or in an interactive mode where the user types a sequence of commands to the program. The CLI is invoked via:\r
+ * <pre>\r
+ * java org.onap.dmaap.datarouter.provisioning.utils.DRRouteCLI [ -s <i>server</i> ] [ <i>command</i> ]\r
+ * </pre>\r
+ * A full description of the arguments to this command are\r
+ * <a href="http://wiki.proto.research.att.com/doku.php?id=datarouter-route-cli">here</a>.\r
+ *\r
+ * @param args command line arguments\r
+ * @throws Exception for any unrecoverable problem\r
+ */\r
+ public static void main(String[] args) throws Exception {\r
+ String server = System.getenv(ENV_VAR);\r
+ if (args.length >= 2 && args[0].equals("-s")) {\r
+ server = args[1];\r
+ String[] t = new String[args.length - 2];\r
+ if (t.length > 0)\r
+ System.arraycopy(args, 2, t, 0, t.length);\r
+ args = t;\r
+ }\r
+ if (server == null || server.equals("")) {\r
+ System.err.println("dr-route: you need to specify a server, either via $PROVSRVR or the '-s' option.");\r
+ System.exit(1);\r
+ }\r
+ DRRouteCLI cli = new DRRouteCLI(server);\r
+ if (args.length > 0) {\r
+ boolean b = cli.runCommand(args);\r
+ System.exit(b ? 0 : 1);\r
+ } else {\r
+ cli.interactive();\r
+ System.exit(0);\r
+ }\r
+ }\r
+\r
+ public static final String ENV_VAR = "PROVSRVR";\r
+ public static final String PROMPT = "dr-route> ";\r
+ public static final String DEFAULT_TRUSTSTORE_PATH = /* $JAVA_HOME + */ "/jre/lib/security/cacerts";\r
\r
- public static final String ENV_VAR = "PROVSRVR";\r
- public static final String PROMPT = "dr-route> ";\r
- public static final String DEFAULT_TRUSTSTORE_PATH = /* $JAVA_HOME + */ "/jre/lib/security/cacerts";\r
+ private final String server;\r
+ private int width = 120; // screen width (for list)\r
+ private AbstractHttpClient httpclient;\r
\r
- private final String server;\r
- private int width = 120; // screen width (for list)\r
- private AbstractHttpClient httpclient;\r
+ /**\r
+ * Create a DRRouteCLI object connecting to the specified server.\r
+ *\r
+ * @param server the server to send command to\r
+ * @throws Exception\r
+ */\r
+ public DRRouteCLI(String server) throws Exception {\r
+ this.server = server;\r
+ this.width = 120;\r
+ this.httpclient = new DefaultHttpClient();\r
\r
- /**\r
- * Create a DRRouteCLI object connecting to the specified server.\r
- * @param server the server to send command to\r
- * @throws Exception\r
- */\r
- public DRRouteCLI(String server) throws Exception {\r
- this.server = server;\r
- this.width = 120;\r
- this.httpclient = new DefaultHttpClient();\r
+ Properties p = (new DB()).getProperties();\r
+ String truststore_file = p.getProperty("org.onap.dmaap.datarouter.provserver.truststore.path");\r
+ String truststore_pw = p.getProperty("org.onap.dmaap.datarouter.provserver.truststore.password");\r
\r
- Properties p = (new DB()).getProperties();\r
- String truststore_file = p.getProperty("org.onap.dmaap.datarouter.provserver.truststore.path");\r
- String truststore_pw = p.getProperty("org.onap.dmaap.datarouter.provserver.truststore.password");\r
+ KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType());\r
+ if (truststore_file == null || truststore_file.equals("")) {\r
+ String jhome = System.getenv("JAVA_HOME");\r
+ if (jhome == null || jhome.equals(""))\r
+ jhome = "/opt/java/jdk/jdk180";\r
+ truststore_file = jhome + DEFAULT_TRUSTSTORE_PATH;\r
+ }\r
+ File f = new File(truststore_file);\r
+ if (f.exists()) {\r
+ FileInputStream instream = new FileInputStream(f);\r
+ try {\r
+ trustStore.load(instream, truststore_pw.toCharArray());\r
+ } catch (Exception x) {\r
+ System.err.println("Problem reading truststore: " + x);\r
+ throw x;\r
+ } finally {\r
+ try {\r
+ instream.close();\r
+ } catch (Exception ignore) {\r
+ }\r
+ }\r
+ }\r
\r
- KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType());\r
- if (truststore_file == null || truststore_file.equals("")) {\r
- String jhome = System.getenv("JAVA_HOME");\r
- if (jhome == null || jhome.equals(""))\r
- jhome = "/opt/java/jdk/jdk180";\r
- truststore_file = jhome + DEFAULT_TRUSTSTORE_PATH;\r
- }\r
- File f = new File(truststore_file);\r
- if (f.exists()) {\r
- FileInputStream instream = new FileInputStream(f);\r
- try {\r
- trustStore.load(instream, truststore_pw.toCharArray());\r
- } catch (Exception x) {\r
- System.err.println("Problem reading truststore: "+x);\r
- throw x;\r
- } finally {\r
- try { instream.close(); } catch (Exception ignore) {}\r
- }\r
- }\r
+ SSLSocketFactory socketFactory = new SSLSocketFactory(trustStore);\r
+ Scheme sch = new Scheme("https", 443, socketFactory);\r
+ httpclient.getConnectionManager().getSchemeRegistry().register(sch);\r
+ }\r
\r
- SSLSocketFactory socketFactory = new SSLSocketFactory(trustStore);\r
- Scheme sch = new Scheme("https", 443, socketFactory);\r
- httpclient.getConnectionManager().getSchemeRegistry().register(sch);\r
- }\r
+ private void interactive() throws IOException {\r
+ LineNumberReader in = new LineNumberReader(new InputStreamReader(System.in));\r
+ while (true) {\r
+ System.out.print(PROMPT);\r
+ String line = in.readLine();\r
+ if (line == null)\r
+ return;\r
+ line = line.trim();\r
+ if (line.equalsIgnoreCase("exit")) // "exit" may only be used in interactive mode\r
+ return;\r
+ if (line.equalsIgnoreCase("quit")) // "quit" may only be used in interactive mode\r
+ return;\r
+ String[] args = line.split("[ \t]+");\r
+ if (args.length > 0)\r
+ runCommand(args);\r
+ }\r
+ }\r
\r
- private void interactive() throws IOException {\r
- LineNumberReader in = new LineNumberReader(new InputStreamReader(System.in));\r
- while (true) {\r
- System.out.print(PROMPT);\r
- String line = in.readLine();\r
- if (line == null)\r
- return;\r
- line = line.trim();\r
- if (line.equalsIgnoreCase("exit")) // "exit" may only be used in interactive mode\r
- return;\r
- if (line.equalsIgnoreCase("quit")) // "quit" may only be used in interactive mode\r
- return;\r
- String[] args = line.split("[ \t]+");\r
- if (args.length > 0)\r
- runCommand(args);\r
- }\r
- }\r
+ /**\r
+ * Run the command specified by the arguments.\r
+ *\r
+ * @param args The command line arguments.\r
+ * @return true if the command was valid and succeeded\r
+ */\r
+ public boolean runCommand(String[] args) {\r
+ String cmd = args[0].trim().toLowerCase();\r
+ if (cmd.equals("add")) {\r
+ if (args.length > 2) {\r
+ if (args[1].startsWith("in") && args.length >= 6) {\r
+ return addIngress(args);\r
+ }\r
+ if (args[1].startsWith("eg") && args.length == 4) {\r
+ return addEgress(args);\r
+ }\r
+ if (args[1].startsWith("ne") && args.length == 5) {\r
+ return addRoute(args);\r
+ }\r
+ }\r
+ System.err.println("Add command should be one of:");\r
+ System.err.println(" add in[gress] feedid user subnet nodepatt [ seq ]");\r
+ System.err.println(" add eg[ress] subid node");\r
+ System.err.println(" add ne[twork] fromnode tonode vianode");\r
+ } else if (cmd.startsWith("del")) {\r
+ if (args.length > 2) {\r
+ if (args[1].startsWith("in") && args.length == 5) {\r
+ return delIngress(args);\r
+ }\r
+ if (args[1].startsWith("in") && args.length == 3) {\r
+ return delIngress(args);\r
+ }\r
+ if (args[1].startsWith("eg") && args.length == 3) {\r
+ return delEgress(args);\r
+ }\r
+ if (args[1].startsWith("ne") && args.length == 4) {\r
+ return delRoute(args);\r
+ }\r
+ }\r
+ System.err.println("Delete command should be one of:");\r
+ System.err.println(" del in[gress] feedid user subnet");\r
+ System.err.println(" del in[gress] seq");\r
+ System.err.println(" del eg[ress] subid");\r
+ System.err.println(" del ne[twork] fromnode tonode");\r
+ } else if (cmd.startsWith("lis")) {\r
+ return list(args);\r
+ } else if (cmd.startsWith("wid") && args.length > 1) {\r
+ width = Integer.parseInt(args[1]);\r
+ return true;\r
+ } else if (cmd.startsWith("?") || cmd.startsWith("hel") || cmd.startsWith("usa")) {\r
+ usage();\r
+ } else if (cmd.startsWith("#")) {\r
+ // comment -- ignore\r
+ } else {\r
+ System.err.println("Command should be one of add, del, list, exit, quit");\r
+ }\r
+ return false;\r
+ }\r
\r
- /**\r
- * Run the command specified by the arguments.\r
- * @param args The command line arguments.\r
- * @return true if the command was valid and succeeded\r
- */\r
- public boolean runCommand(String[] args) {\r
- String cmd = args[0].trim().toLowerCase();\r
- if (cmd.equals("add")) {\r
- if (args.length > 2) {\r
- if (args[1].startsWith("in") && args.length >= 6) {\r
- return addIngress(args);\r
- }\r
- if (args[1].startsWith("eg") && args.length == 4) {\r
- return addEgress(args);\r
- }\r
- if (args[1].startsWith("ne") && args.length == 5) {\r
- return addRoute(args);\r
- }\r
- }\r
- System.err.println("Add command should be one of:");\r
- System.err.println(" add in[gress] feedid user subnet nodepatt [ seq ]");\r
- System.err.println(" add eg[ress] subid node");\r
- System.err.println(" add ne[twork] fromnode tonode vianode");\r
- } else if (cmd.startsWith("del")) {\r
- if (args.length > 2) {\r
- if (args[1].startsWith("in") && args.length == 5) {\r
- return delIngress(args);\r
- }\r
- if (args[1].startsWith("in") && args.length == 3) {\r
- return delIngress(args);\r
- }\r
- if (args[1].startsWith("eg") && args.length == 3) {\r
- return delEgress(args);\r
- }\r
- if (args[1].startsWith("ne") && args.length == 4) {\r
- return delRoute(args);\r
- }\r
- }\r
- System.err.println("Delete command should be one of:");\r
- System.err.println(" del in[gress] feedid user subnet");\r
- System.err.println(" del in[gress] seq");\r
- System.err.println(" del eg[ress] subid");\r
- System.err.println(" del ne[twork] fromnode tonode");\r
- } else if (cmd.startsWith("lis")) {\r
- return list(args);\r
- } else if (cmd.startsWith("wid") && args.length > 1) {\r
- width = Integer.parseInt(args[1]);\r
- return true;\r
- } else if (cmd.startsWith("?") || cmd.startsWith("hel") || cmd.startsWith("usa")) {\r
- usage();\r
- } else if (cmd.startsWith("#")) {\r
- // comment -- ignore\r
- } else {\r
- System.err.println("Command should be one of add, del, list, exit, quit");\r
- }\r
- return false;\r
- }\r
+ private void usage() {\r
+ System.out.println("Enter one of the following commands:");\r
+ System.out.println(" add in[gress] feedid user subnet nodepatt [ seq ]");\r
+ System.out.println(" add eg[ress] subid node");\r
+ System.out.println(" add ne[twork] fromnode tonode vianode");\r
+ System.out.println(" del in[gress] feedid user subnet");\r
+ System.out.println(" del in[gress] seq");\r
+ System.out.println(" del eg[ress] subid");\r
+ System.out.println(" del ne[twork] fromnode tonode");\r
+ System.out.println(" list [ all | ingress | egress | network ]");\r
+ System.out.println(" exit");\r
+ System.out.println(" quit");\r
+ }\r
\r
- private void usage() {\r
- System.out.println("Enter one of the following commands:");\r
- System.out.println(" add in[gress] feedid user subnet nodepatt [ seq ]");\r
- System.out.println(" add eg[ress] subid node");\r
- System.out.println(" add ne[twork] fromnode tonode vianode");\r
- System.out.println(" del in[gress] feedid user subnet");\r
- System.out.println(" del in[gress] seq");\r
- System.out.println(" del eg[ress] subid");\r
- System.out.println(" del ne[twork] fromnode tonode");\r
- System.out.println(" list [ all | ingress | egress | network ]");\r
- System.out.println(" exit");\r
- System.out.println(" quit");\r
- }\r
+ private boolean addIngress(String[] args) {\r
+ String url = String.format("https://%s/internal/route/ingress/?feed=%s&user=%s&subnet=%s&nodepatt=%s", server, args[2], args[3], args[4], args[5]);\r
+ if (args.length > 6)\r
+ url += "&seq=" + args[6];\r
+ return doPost(url);\r
+ }\r
\r
- private boolean addIngress(String[] args) {\r
- String url = String.format("https://%s/internal/route/ingress/?feed=%s&user=%s&subnet=%s&nodepatt=%s", server, args[2], args[3], args[4], args[5]);\r
- if (args.length > 6)\r
- url += "&seq=" + args[6];\r
- return doPost(url);\r
- }\r
+ private boolean addEgress(String[] args) {\r
+ String url = String.format("https://%s/internal/route/egress/?sub=%s&node=%s", server, args[2], args[3]);\r
+ return doPost(url);\r
+ }\r
\r
- private boolean addEgress(String[] args) {\r
- String url = String.format("https://%s/internal/route/egress/?sub=%s&node=%s", server, args[2], args[3]);\r
- return doPost(url);\r
- }\r
+ private boolean addRoute(String[] args) {\r
+ String url = String.format("https://%s/internal/route/network/?from=%s&to=%s&via=%s", server, args[2], args[3], args[4]);\r
+ return doPost(url);\r
+ }\r
\r
- private boolean addRoute(String[] args) {\r
- String url = String.format("https://%s/internal/route/network/?from=%s&to=%s&via=%s", server, args[2], args[3], args[4]);\r
- return doPost(url);\r
- }\r
+ private boolean delIngress(String[] args) {\r
+ String url;\r
+ if (args.length == 5) {\r
+ String subnet = args[4].replaceAll("/", "!"); // replace the / with a !\r
+ url = String.format("https://%s/internal/route/ingress/%s/%s/%s", server, args[2], args[3], subnet);\r
+ } else {\r
+ url = String.format("https://%s/internal/route/ingress/%s", server, args[2]);\r
+ }\r
+ return doDelete(url);\r
+ }\r
\r
- private boolean delIngress(String[] args) {\r
- String url;\r
- if (args.length == 5) {\r
- String subnet = args[4].replaceAll("/", "!"); // replace the / with a !\r
- url = String.format("https://%s/internal/route/ingress/%s/%s/%s", server, args[2], args[3], subnet);\r
- } else {\r
- url = String.format("https://%s/internal/route/ingress/%s", server, args[2]);\r
- }\r
- return doDelete(url);\r
- }\r
+ private boolean delEgress(String[] args) {\r
+ String url = String.format("https://%s/internal/route/egress/%s", server, args[2]);\r
+ return doDelete(url);\r
+ }\r
\r
- private boolean delEgress(String[] args) {\r
- String url = String.format("https://%s/internal/route/egress/%s", server, args[2]);\r
- return doDelete(url);\r
- }\r
+ private boolean delRoute(String[] args) {\r
+ String url = String.format("https://%s/internal/route/network/%s/%s", server, args[2], args[3]);\r
+ return doDelete(url);\r
+ }\r
\r
- private boolean delRoute(String[] args) {\r
- String url = String.format("https://%s/internal/route/network/%s/%s", server, args[2], args[3]);\r
- return doDelete(url);\r
- }\r
+ private boolean list(String[] args) {\r
+ String tbl = (args.length == 1) ? "all" : args[1].toLowerCase();\r
+ JSONObject jo = doGet("https://" + server + "/internal/route/"); // Returns all 3 tables\r
+ StringBuilder sb = new StringBuilder();\r
+ if (tbl.startsWith("al") || tbl.startsWith("in")) {\r
+ // Display the IRT\r
+ JSONArray irt = jo.optJSONArray("ingress");\r
+ int cw1 = 6, cw2 = 6, cw3 = 6, cw4 = 6; // determine column widths for first 4 cols\r
+ for (int i = 0; irt != null && i < irt.length(); i++) {\r
+ JSONObject e = irt.getJSONObject(i);\r
+ cw1 = Math.max(cw1, ("" + e.getInt("seq")).length());\r
+ cw2 = Math.max(cw2, ("" + e.getInt("feedid")).length());\r
+ String t = e.optString("user");\r
+ cw3 = Math.max(cw3, (t == null) ? 1 : t.length());\r
+ t = e.optString("subnet");\r
+ cw4 = Math.max(cw4, (t == null) ? 1 : t.length());\r
+ }\r
\r
- private boolean list(String[] args) {\r
- String tbl = (args.length == 1) ? "all" : args[1].toLowerCase();\r
- JSONObject jo = doGet("https://"+server+"/internal/route/"); // Returns all 3 tables\r
- StringBuilder sb = new StringBuilder();\r
- if (tbl.startsWith("al") || tbl.startsWith("in")) {\r
- // Display the IRT\r
- JSONArray irt = jo.optJSONArray("ingress");\r
- int cw1 = 6, cw2 = 6, cw3 = 6, cw4 = 6; // determine column widths for first 4 cols\r
- for (int i = 0; irt != null && i < irt.length(); i++) {\r
- JSONObject e = irt.getJSONObject(i);\r
- cw1 = Math.max(cw1, (""+ e.getInt("seq")).length());\r
- cw2 = Math.max(cw2, (""+e.getInt("feedid")).length());\r
- String t = e.optString("user");\r
- cw3 = Math.max(cw3, (t == null) ? 1 : t.length());\r
- t = e.optString("subnet");\r
- cw4 = Math.max(cw4, (t == null) ? 1 : t.length());\r
- }\r
+ int nblank = cw1 + cw2 + cw3 + cw4 + 8;\r
+ sb.append("Ingress Routing Table\n");\r
+ sb.append(String.format("%s %s %s %s Nodes\n", ext("Seq", cw1), ext("FeedID", cw2), ext("User", cw3), ext("Subnet", cw4)));\r
+ for (int i = 0; irt != null && i < irt.length(); i++) {\r
+ JSONObject e = irt.getJSONObject(i);\r
+ String seq = "" + e.getInt("seq");\r
+ String feedid = "" + e.getInt("feedid");\r
+ String user = e.optString("user");\r
+ String subnet = e.optString("subnet");\r
+ if (user.equals("")) user = "-";\r
+ if (subnet.equals("")) subnet = "-";\r
+ JSONArray nodes = e.getJSONArray("node");\r
+ int sol = sb.length();\r
+ sb.append(String.format("%s %s %s %s ", ext(seq, cw1), ext(feedid, cw2), ext(user, cw3), ext(subnet, cw4)));\r
+ for (int j = 0; j < nodes.length(); j++) {\r
+ String nd = nodes.getString(j);\r
+ int cursor = sb.length() - sol;\r
+ if (j > 0 && (cursor + nd.length() > width)) {\r
+ sb.append("\n");\r
+ sol = sb.length();\r
+ sb.append(ext(" ", nblank));\r
+ }\r
+ sb.append(nd);\r
+ if ((j + 1) < nodes.length()) {\r
+ sb.append(", ");\r
+ }\r
+ }\r
+ sb.append("\n");\r
+ }\r
+ }\r
+ if (tbl.startsWith("al") || tbl.startsWith("eg")) {\r
+ // Display the ERT\r
+ JSONObject ert = jo.optJSONObject("egress");\r
+ String[] subs = (ert == null) ? new String[0] : JSONObject.getNames(ert);\r
+ if (subs == null)\r
+ subs = new String[0];\r
+ Arrays.sort(subs);\r
+ int cw1 = 5;\r
+ for (int i = 0; i < subs.length; i++) {\r
+ cw1 = Math.max(cw1, subs[i].length());\r
+ }\r
\r
- int nblank = cw1 + cw2 + cw3 + cw4 + 8;\r
- sb.append("Ingress Routing Table\n");\r
- sb.append(String.format("%s %s %s %s Nodes\n", ext("Seq", cw1), ext("FeedID", cw2), ext("User", cw3), ext("Subnet", cw4)));\r
- for (int i = 0; irt != null && i < irt.length(); i++) {\r
- JSONObject e = irt.getJSONObject(i);\r
- String seq = ""+e.getInt("seq");\r
- String feedid = ""+e.getInt("feedid");\r
- String user = e.optString("user");\r
- String subnet = e.optString("subnet");\r
- if (user.equals("")) user = "-";\r
- if (subnet.equals("")) subnet = "-";\r
- JSONArray nodes = e.getJSONArray("node");\r
- int sol = sb.length();\r
- sb.append(String.format("%s %s %s %s ", ext(seq, cw1), ext(feedid, cw2), ext(user, cw3), ext(subnet, cw4)));\r
- for (int j = 0; j < nodes.length(); j++) {\r
- String nd = nodes.getString(j);\r
- int cursor = sb.length() - sol;\r
- if (j > 0 && (cursor + nd.length() > width)) {\r
- sb.append("\n");\r
- sol = sb.length();\r
- sb.append(ext(" ", nblank));\r
- }\r
- sb.append(nd);\r
- if ((j+1) < nodes.length()) {\r
- sb.append(", ");\r
- }\r
- }\r
- sb.append("\n");\r
- }\r
- }\r
- if (tbl.startsWith("al") || tbl.startsWith("eg")) {\r
- // Display the ERT\r
- JSONObject ert = jo.optJSONObject("egress");\r
- String[] subs = (ert == null) ? new String[0] : JSONObject.getNames(ert);\r
- if (subs == null)\r
- subs = new String[0];\r
- Arrays.sort(subs);\r
- int cw1 = 5;\r
- for (int i = 0; i < subs.length; i++) {\r
- cw1 = Math.max(cw1, subs[i].length());\r
- }\r
+ if (sb.length() > 0)\r
+ sb.append("\n");\r
+ sb.append("Egress Routing Table\n");\r
+ sb.append(String.format("%s Node\n", ext("SubID", cw1)));\r
+ for (int i = 0; i < subs.length; i++) {\r
+ String node = ert.getString(subs[i]);\r
+ sb.append(String.format("%s %s\n", ext(subs[i], cw1), node));\r
+ }\r
+ }\r
+ if (tbl.startsWith("al") || tbl.startsWith("ne")) {\r
+ // Display the NRT\r
+ JSONArray nrt = jo.optJSONArray("routing");\r
+ int cw1 = 4, cw2 = 4;\r
+ for (int i = 0; nrt != null && i < nrt.length(); i++) {\r
+ JSONObject e = nrt.getJSONObject(i);\r
+ String from = e.getString("from");\r
+ String to = e.getString("to");\r
+ cw1 = Math.max(cw1, from.length());\r
+ cw2 = Math.max(cw2, to.length());\r
+ }\r
\r
- if (sb.length() > 0)\r
- sb.append("\n");\r
- sb.append("Egress Routing Table\n");\r
- sb.append(String.format("%s Node\n", ext("SubID", cw1)));\r
- for (int i = 0; i < subs.length; i++) {\r
- String node = ert.getString(subs[i]);\r
- sb.append(String.format("%s %s\n", ext(subs[i], cw1), node));\r
- }\r
- }\r
- if (tbl.startsWith("al") || tbl.startsWith("ne")) {\r
- // Display the NRT\r
- JSONArray nrt = jo.optJSONArray("routing");\r
- int cw1 = 4, cw2 = 4;\r
- for (int i = 0; nrt != null && i < nrt.length(); i++) {\r
- JSONObject e = nrt.getJSONObject(i);\r
- String from = e.getString("from");\r
- String to = e.getString("to");\r
- cw1 = Math.max(cw1, from.length());\r
- cw2 = Math.max(cw2, to.length());\r
- }\r
+ if (sb.length() > 0)\r
+ sb.append("\n");\r
+ sb.append("Network Routing Table\n");\r
+ sb.append(String.format("%s %s Via\n", ext("From", cw1), ext("To", cw2)));\r
+ for (int i = 0; nrt != null && i < nrt.length(); i++) {\r
+ JSONObject e = nrt.getJSONObject(i);\r
+ String from = e.getString("from");\r
+ String to = e.getString("to");\r
+ String via = e.getString("via");\r
+ sb.append(String.format("%s %s %s\n", ext(from, cw1), ext(to, cw2), via));\r
+ }\r
+ }\r
+ System.out.print(sb.toString());\r
+ return true;\r
+ }\r
\r
- if (sb.length() > 0)\r
- sb.append("\n");\r
- sb.append("Network Routing Table\n");\r
- sb.append(String.format("%s %s Via\n", ext("From", cw1), ext("To", cw2)));\r
- for (int i = 0; nrt != null && i < nrt.length(); i++) {\r
- JSONObject e = nrt.getJSONObject(i);\r
- String from = e.getString("from");\r
- String to = e.getString("to");\r
- String via = e.getString("via");\r
- sb.append(String.format("%s %s %s\n", ext(from, cw1), ext(to, cw2), via));\r
- }\r
- }\r
- System.out.print(sb.toString());\r
- return true;\r
- }\r
- private String ext(String s, int n) {\r
- if (s == null)\r
- s = "-";\r
- while (s.length() < n)\r
- s += " ";\r
- return s;\r
- }\r
+ private String ext(String s, int n) {\r
+ if (s == null)\r
+ s = "-";\r
+ while (s.length() < n)\r
+ s += " ";\r
+ return s;\r
+ }\r
\r
- private boolean doDelete(String url) {\r
- boolean rv = false;\r
- HttpDelete meth = new HttpDelete(url);\r
- try {\r
- HttpResponse response = httpclient.execute(meth);\r
- HttpEntity entity = response.getEntity();\r
- StatusLine sl = response.getStatusLine();\r
- rv = (sl.getStatusCode() == HttpServletResponse.SC_OK);\r
- if (rv) {\r
- System.out.println("Routing entry deleted.");\r
- EntityUtils.consume(entity);\r
- } else {\r
- printErrorText(entity);\r
- }\r
- } catch (Exception e) {\r
- } finally {\r
- meth.releaseConnection();\r
- }\r
- return rv;\r
- }\r
+ private boolean doDelete(String url) {\r
+ boolean rv = false;\r
+ HttpDelete meth = new HttpDelete(url);\r
+ try {\r
+ HttpResponse response = httpclient.execute(meth);\r
+ HttpEntity entity = response.getEntity();\r
+ StatusLine sl = response.getStatusLine();\r
+ rv = (sl.getStatusCode() == HttpServletResponse.SC_OK);\r
+ if (rv) {\r
+ System.out.println("Routing entry deleted.");\r
+ EntityUtils.consume(entity);\r
+ } else {\r
+ printErrorText(entity);\r
+ }\r
+ } catch (Exception e) {\r
+ } finally {\r
+ meth.releaseConnection();\r
+ }\r
+ return rv;\r
+ }\r
\r
- private JSONObject doGet(String url) {\r
- JSONObject rv = new JSONObject();\r
- HttpGet meth = new HttpGet(url);\r
- try {\r
- HttpResponse response = httpclient.execute(meth);\r
- HttpEntity entity = response.getEntity();\r
- StatusLine sl = response.getStatusLine();\r
- if (sl.getStatusCode() == HttpServletResponse.SC_OK) {\r
- rv = new JSONObject(new JSONTokener(entity.getContent()));\r
- } else {\r
- printErrorText(entity);\r
- }\r
- } catch (Exception e) {\r
- System.err.println(e);\r
- } finally {\r
- meth.releaseConnection();\r
- }\r
- return rv;\r
- }\r
+ private JSONObject doGet(String url) {\r
+ JSONObject rv = new JSONObject();\r
+ HttpGet meth = new HttpGet(url);\r
+ try {\r
+ HttpResponse response = httpclient.execute(meth);\r
+ HttpEntity entity = response.getEntity();\r
+ StatusLine sl = response.getStatusLine();\r
+ if (sl.getStatusCode() == HttpServletResponse.SC_OK) {\r
+ rv = new JSONObject(new JSONTokener(entity.getContent()));\r
+ } else {\r
+ printErrorText(entity);\r
+ }\r
+ } catch (Exception e) {\r
+ System.err.println(e);\r
+ } finally {\r
+ meth.releaseConnection();\r
+ }\r
+ return rv;\r
+ }\r
\r
- private boolean doPost(String url) {\r
- boolean rv = false;\r
- HttpPost meth = new HttpPost(url);\r
- try {\r
- HttpResponse response = httpclient.execute(meth);\r
- HttpEntity entity = response.getEntity();\r
- StatusLine sl = response.getStatusLine();\r
- rv = (sl.getStatusCode() == HttpServletResponse.SC_OK);\r
- if (rv) {\r
- System.out.println("Routing entry added.");\r
- EntityUtils.consume(entity);\r
- } else {\r
- printErrorText(entity);\r
- }\r
- } catch (Exception e) {\r
- } finally {\r
- meth.releaseConnection();\r
- }\r
- return rv;\r
- }\r
+ private boolean doPost(String url) {\r
+ boolean rv = false;\r
+ HttpPost meth = new HttpPost(url);\r
+ try {\r
+ HttpResponse response = httpclient.execute(meth);\r
+ HttpEntity entity = response.getEntity();\r
+ StatusLine sl = response.getStatusLine();\r
+ rv = (sl.getStatusCode() == HttpServletResponse.SC_OK);\r
+ if (rv) {\r
+ System.out.println("Routing entry added.");\r
+ EntityUtils.consume(entity);\r
+ } else {\r
+ printErrorText(entity);\r
+ }\r
+ } catch (Exception e) {\r
+ } finally {\r
+ meth.releaseConnection();\r
+ }\r
+ return rv;\r
+ }\r
\r
- private void printErrorText(HttpEntity entity) throws IllegalStateException, IOException {\r
- // Look for and print only the part of the output between <pre>...</pre>\r
- InputStream is = entity.getContent();\r
- StringBuilder sb = new StringBuilder();\r
- byte[] b = new byte[512];\r
- int n = 0;\r
- while ((n = is.read(b)) > 0) {\r
- sb.append(new String(b, 0, n));\r
- }\r
- is.close();\r
- int ix = sb.indexOf("<pre>");\r
- if (ix > 0)\r
- sb.delete(0, ix+5);\r
- ix = sb.indexOf("</pre>");\r
- if (ix > 0)\r
- sb.delete(ix, sb.length());\r
- System.err.println(sb.toString());\r
- }\r
+ private void printErrorText(HttpEntity entity) throws IllegalStateException, IOException {\r
+ // Look for and print only the part of the output between <pre>...</pre>\r
+ InputStream is = entity.getContent();\r
+ StringBuilder sb = new StringBuilder();\r
+ byte[] b = new byte[512];\r
+ int n = 0;\r
+ while ((n = is.read(b)) > 0) {\r
+ sb.append(new String(b, 0, n));\r
+ }\r
+ is.close();\r
+ int ix = sb.indexOf("<pre>");\r
+ if (ix > 0)\r
+ sb.delete(0, ix + 5);\r
+ ix = sb.indexOf("</pre>");\r
+ if (ix > 0)\r
+ sb.delete(ix, sb.length());\r
+ System.err.println(sb.toString());\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: JSONUtilities.java,v 1.1 2013/04/26 21:00:26 eby Exp $\r
*/\r
public class JSONUtilities {\r
- /**\r
- * Does the String <i>v</i> represent a valid Internet address (with or without a\r
- * mask length appended).\r
- * @param v the string to check\r
- * @return true if valid, false otherwise\r
- */\r
- public static boolean validIPAddrOrSubnet(String v) {\r
- String[] pp = { v, "" };\r
- if (v.indexOf('/') > 0)\r
- pp = v.split("/");\r
- try {\r
- InetAddress addr = InetAddress.getByName(pp[0]);\r
- if (pp[1].length() > 0) {\r
- // check subnet mask\r
- int mask = Integer.parseInt(pp[1]);\r
- if (mask > (addr.getAddress().length * 8))\r
- return false;\r
- }\r
- return true;\r
- } catch (UnknownHostException e) {\r
- return false;\r
- }\r
- }\r
- /**\r
- * Build a JSON array from a collection of Strings.\r
- * @param coll the collection\r
- * @return a String containing a JSON array\r
- */\r
- public static String createJSONArray(Collection<String> coll) {\r
- StringBuilder sb = new StringBuilder("[");\r
- String pfx = "\n";\r
- for (String t : coll) {\r
- sb.append(pfx).append(" \"").append(t).append("\"");\r
- pfx = ",\n";\r
- }\r
- sb.append("\n]\n");\r
- return sb.toString();\r
- }\r
+ /**\r
+ * Does the String <i>v</i> represent a valid Internet address (with or without a\r
+ * mask length appended).\r
+ *\r
+ * @param v the string to check\r
+ * @return true if valid, false otherwise\r
+ */\r
+ public static boolean validIPAddrOrSubnet(String v) {\r
+ String[] pp = {v, ""};\r
+ if (v.indexOf('/') > 0)\r
+ pp = v.split("/");\r
+ try {\r
+ InetAddress addr = InetAddress.getByName(pp[0]);\r
+ if (pp[1].length() > 0) {\r
+ // check subnet mask\r
+ int mask = Integer.parseInt(pp[1]);\r
+ if (mask > (addr.getAddress().length * 8))\r
+ return false;\r
+ }\r
+ return true;\r
+ } catch (UnknownHostException e) {\r
+ return false;\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Build a JSON array from a collection of Strings.\r
+ *\r
+ * @param coll the collection\r
+ * @return a String containing a JSON array\r
+ */\r
+ public static String createJSONArray(Collection<String> coll) {\r
+ StringBuilder sb = new StringBuilder("[");\r
+ String pfx = "\n";\r
+ for (String t : coll) {\r
+ sb.append(pfx).append(" \"").append(t).append("\"");\r
+ pfx = ",\n";\r
+ }\r
+ sb.append("\n]\n");\r
+ return sb.toString();\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
- * *
+ * *
* * http://www.apache.org/licenses/LICENSE-2.0
- * *
+ * *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* <pre>
* myString = new JSONObject().put("JSON", "Hello, World!").toString();
* </pre>
- *
+ * <p>
* produces the string <code>{"JSON": "Hello, World"}</code>.
* <p>
* The texts produced by the <code>toString</code> methods strictly conform to
/**
* The maximum number of keys in the key pool.
*/
- private static final int keyPoolSize = 100;
+ private static final int keyPoolSize = 100;
- /**
+ /**
* Key pooling is like string interning, but without permanently tying up
* memory. To help conserve memory, storage of duplicated key strings in
* JSONObjects will be avoided by using a key pool to manage unique key
* string objects. This is used by JSONObject.put(string, object).
*/
- private static Map<String,Object> keyPool = new LinkedHashMap<String,Object>(keyPoolSize);
+ private static Map<String, Object> keyPool = new LinkedHashMap<String, Object>(keyPoolSize);
/**
* JSONObject.NULL is equivalent to the value that JavaScript calls null,
* whilst Java's null is equivalent to the value that JavaScript calls
* undefined.
*/
- private static final class Null {
+ private static final class Null {
/**
* There is only intended to be a single instance of the NULL object,
* so the clone method returns itself.
- * @return NULL.
+ *
+ * @return NULL.
*/
protected final Object clone() {
return this;
/**
* A Null object is equal to the null value and to itself.
- * @param object An object to test for nullness.
+ *
+ * @param object An object to test for nullness.
* @return true if the object parameter is the JSONObject.NULL object
- * or null.
+ * or null.
*/
public boolean equals(Object object) {
return object == null || object == this;
/**
* Get the "null" string value.
+ *
* @return The string "null".
*/
public String toString() {
/**
* The map where the JSONObject's properties are kept.
*/
- private final Map<String,Object> map;
+ private final Map<String, Object> map;
/**
* Construct an empty JSONObject.
*/
public LOGJSONObject() {
- this.map = new LinkedHashMap<String,Object>();
+ this.map = new LinkedHashMap<String, Object>();
}
* Construct a JSONObject from a subset of another JSONObject.
* An array of strings is used to identify the keys that should be copied.
* Missing keys are ignored.
- * @param jo A JSONObject.
+ *
+ * @param jo A JSONObject.
* @param names An array of strings.
* @throws JSONException
- * @exception JSONException If a value is a non-finite number or if a name is duplicated.
+ * @throws JSONException If a value is a non-finite number or if a name is duplicated.
*/
public LOGJSONObject(LOGJSONObject jo, String[] names) {
this();
/**
* Construct a JSONObject from a JSONTokener.
+ *
* @param x A JSONTokener object containing the source string.
* @throws JSONException If there is a syntax error in the source string
- * or a duplicated key.
+ * or a duplicated key.
*/
public LOGJSONObject(JSONTokener x) throws JSONException {
this();
if (x.nextClean() != '{') {
throw x.syntaxError("A JSONObject text must begin with '{'");
}
- for (;;) {
+ for (; ; ) {
c = x.nextClean();
switch (c) {
- case 0:
- throw x.syntaxError("A JSONObject text must end with '}'");
- case '}':
- return;
- default:
- x.back();
- key = x.nextValue().toString();
+ case 0:
+ throw x.syntaxError("A JSONObject text must end with '}'");
+ case '}':
+ return;
+ default:
+ x.back();
+ key = x.nextValue().toString();
}
// The key is followed by ':'. We will also tolerate '=' or '=>'.
// Pairs are separated by ','. We will also tolerate ';'.
switch (x.nextClean()) {
- case ';':
- case ',':
- if (x.nextClean() == '}') {
+ case ';':
+ case ',':
+ if (x.nextClean() == '}') {
+ return;
+ }
+ x.back();
+ break;
+ case '}':
return;
- }
- x.back();
- break;
- case '}':
- return;
- default:
- throw x.syntaxError("Expected a ',' or '}'");
+ default:
+ throw x.syntaxError("Expected a ',' or '}'");
}
}
}
* Construct a JSONObject from a Map.
*
* @param map A map object that can be used to initialize the contents of
- * the JSONObject.
+ * the JSONObject.
* @throws JSONException
*/
- public LOGJSONObject(Map<String,Object> map) {
- this.map = new LinkedHashMap<String,Object>();
+ public LOGJSONObject(Map<String, Object> map) {
+ this.map = new LinkedHashMap<String, Object>();
if (map != null) {
- Iterator<Map.Entry<String,Object>> i = map.entrySet().iterator();
+ Iterator<Map.Entry<String, Object>> i = map.entrySet().iterator();
while (i.hasNext()) {
- Map.Entry<String,Object> e = i.next();
+ Map.Entry<String, Object> e = i.next();
Object value = e.getValue();
if (value != null) {
this.map.put(e.getKey(), wrap(value));
* with <code>"get"</code> or <code>"is"</code> followed by an uppercase letter,
* the method is invoked, and a key and the value returned from the getter method
* are put into the new JSONObject.
- *
+ * <p>
* The key is formed by removing the <code>"get"</code> or <code>"is"</code> prefix.
* If the second remaining character is not upper case, then the first
* character is converted to lower case.
- *
+ * <p>
* For example, if an object has a method named <code>"getName"</code>, and
* if the result of calling <code>object.getName()</code> is <code>"Larry Fine"</code>,
* then the JSONObject will contain <code>"name": "Larry Fine"</code>.
*
* @param bean An object that has getter methods that should be used
- * to make a JSONObject.
+ * to make a JSONObject.
*/
public LOGJSONObject(Object bean) {
this();
* from the names array, and the values will be the field values associated
* with those keys in the object. If a key is not found or not visible,
* then it will not be copied into the new JSONObject.
+ *
* @param object An object that has fields that should be used to make a
- * JSONObject.
- * @param names An array of strings, the names of the fields to be obtained
- * from the object.
+ * JSONObject.
+ * @param names An array of strings, the names of the fields to be obtained
+ * from the object.
*/
public LOGJSONObject(Object object, String names[]) {
this();
/**
* Construct a JSONObject from a source JSON text string.
* This is the most commonly used JSONObject constructor.
- * @param source A string beginning
- * with <code>{</code> <small>(left brace)</small> and ending
- * with <code>}</code> <small>(right brace)</small>.
- * @exception JSONException If there is a syntax error in the source
- * string or a duplicated key.
+ *
+ * @param source A string beginning
+ * with <code>{</code> <small>(left brace)</small> and ending
+ * with <code>}</code> <small>(right brace)</small>.
+ * @throws JSONException If there is a syntax error in the source
+ * string or a duplicated key.
*/
public LOGJSONObject(String source) throws JSONException {
this(new JSONTokener(source));
/**
* Construct a JSONObject from a ResourceBundle.
+ *
* @param baseName The ResourceBundle base name.
- * @param locale The Locale to load the ResourceBundle for.
+ * @param locale The Locale to load the ResourceBundle for.
* @throws JSONException If any JSONExceptions are detected.
*/
public LOGJSONObject(String baseName, Locale locale) throws JSONException {
// segment except the last. Add the value using the last segment's name into
// the deepest nested JSONObject.
- String[] path = ((String)key).split("\\.");
+ String[] path = ((String) key).split("\\.");
int last = path.length - 1;
LOGJSONObject target = this;
for (int i = 0; i < last; i += 1) {
}
target = nextTarget;
}
- target.put(path[last], bundle.getString((String)key));
+ target.put(path[last], bundle.getString((String) key));
}
}
}
* JSONArray is stored under the key to hold all of the accumulated values.
* If there is already a JSONArray, then the new value is appended to it.
* In contrast, the put method replaces the previous value.
- *
+ * <p>
* If only one value is accumulated that is not a JSONArray, then the
* result will be the same as using put. But if multiple values are
* accumulated, then the result will be like append.
+ *
* @param key A key string.
* @param value An object to be accumulated under the key.
* @return this.
* @throws JSONException If the value is an invalid number
- * or if the key is null.
+ * or if the key is null.
*/
public LOGJSONObject accumulate(
- String key,
- Object value
+ String key,
+ Object value
) throws JSONException {
testValidity(value);
Object object = this.opt(key);
? new JSONArray().put(value)
: value);
} else if (object instanceof JSONArray) {
- ((JSONArray)object).put(value);
+ ((JSONArray) object).put(value);
} else {
this.put(key, new JSONArray().put(object).put(value));
}
* JSONObject, then the key is put in the JSONObject with its value being a
* JSONArray containing the value parameter. If the key was already
* associated with a JSONArray, then the value parameter is appended to it.
+ *
* @param key A key string.
* @param value An object to be accumulated under the key.
* @return this.
* @throws JSONException If the key is null or if the current value
- * associated with the key is not a JSONArray.
+ * associated with the key is not a JSONArray.
*/
public LOGJSONObject append(String key, Object value) throws JSONException {
testValidity(value);
if (object == null) {
this.put(key, new JSONArray().put(value));
} else if (object instanceof JSONArray) {
- this.put(key, ((JSONArray)object).put(value));
+ this.put(key, ((JSONArray) object).put(value));
} else {
throw new JSONException("JSONObject[" + key +
"] is not a JSONArray.");
/**
* Produce a string from a double. The string "null" will be returned if
* the number is not finite.
- * @param d A double.
+ *
+ * @param d A double.
* @return A String.
*/
public static String doubleToString(double d) {
/**
* Get the value object associated with a key.
*
- * @param key A key string.
- * @return The object associated with the key.
- * @throws JSONException if the key is not found.
+ * @param key A key string.
+ * @return The object associated with the key.
+ * @throws JSONException if the key is not found.
*/
public Object get(String key) throws JSONException {
if (key == null) {
/**
* Get the boolean value associated with a key.
*
- * @param key A key string.
- * @return The truth.
- * @throws JSONException
- * if the value is not a Boolean or the String "true" or "false".
+ * @param key A key string.
+ * @return The truth.
+ * @throws JSONException if the value is not a Boolean or the String "true" or "false".
*/
public boolean getBoolean(String key) throws JSONException {
Object object = this.get(key);
if (object.equals(Boolean.FALSE) ||
(object instanceof String &&
- ((String)object).equalsIgnoreCase("false"))) {
+ ((String) object).equalsIgnoreCase("false"))) {
return false;
} else if (object.equals(Boolean.TRUE) ||
(object instanceof String &&
- ((String)object).equalsIgnoreCase("true"))) {
+ ((String) object).equalsIgnoreCase("true"))) {
return true;
}
throw new JSONException("JSONObject[" + quote(key) +
/**
* Get the double value associated with a key.
- * @param key A key string.
- * @return The numeric value.
+ *
+ * @param key A key string.
+ * @return The numeric value.
* @throws JSONException if the key is not found or
- * if the value is not a Number object and cannot be converted to a number.
+ * if the value is not a Number object and cannot be converted to a number.
*/
public double getDouble(String key) throws JSONException {
Object object = this.get(key);
try {
return object instanceof Number
- ? ((Number)object).doubleValue()
- : Double.parseDouble((String)object);
+ ? ((Number) object).doubleValue()
+ : Double.parseDouble((String) object);
} catch (Exception e) {
throw new JSONException("JSONObject[" + quote(key) +
- "] is not a number.");
+ "] is not a number.");
}
}
/**
* Get the int value associated with a key.
*
- * @param key A key string.
- * @return The integer value.
- * @throws JSONException if the key is not found or if the value cannot
- * be converted to an integer.
+ * @param key A key string.
+ * @return The integer value.
+ * @throws JSONException if the key is not found or if the value cannot
+ * be converted to an integer.
*/
public int getInt(String key) throws JSONException {
Object object = this.get(key);
try {
return object instanceof Number
- ? ((Number)object).intValue()
- : Integer.parseInt((String)object);
+ ? ((Number) object).intValue()
+ : Integer.parseInt((String) object);
} catch (Exception e) {
throw new JSONException("JSONObject[" + quote(key) +
- "] is not an int.");
+ "] is not an int.");
}
}
/**
* Get the JSONArray value associated with a key.
*
- * @param key A key string.
- * @return A JSONArray which is the value.
- * @throws JSONException if the key is not found or
- * if the value is not a JSONArray.
+ * @param key A key string.
+ * @return A JSONArray which is the value.
+ * @throws JSONException if the key is not found or
+ * if the value is not a JSONArray.
*/
public JSONArray getJSONArray(String key) throws JSONException {
Object object = this.get(key);
if (object instanceof JSONArray) {
- return (JSONArray)object;
+ return (JSONArray) object;
}
throw new JSONException("JSONObject[" + quote(key) +
"] is not a JSONArray.");
/**
* Get the JSONObject value associated with a key.
*
- * @param key A key string.
- * @return A JSONObject which is the value.
- * @throws JSONException if the key is not found or
- * if the value is not a JSONObject.
+ * @param key A key string.
+ * @return A JSONObject which is the value.
+ * @throws JSONException if the key is not found or
+ * if the value is not a JSONObject.
*/
public LOGJSONObject getJSONObject(String key) throws JSONException {
Object object = this.get(key);
if (object instanceof LOGJSONObject) {
- return (LOGJSONObject)object;
+ return (LOGJSONObject) object;
}
throw new JSONException("JSONObject[" + quote(key) +
"] is not a JSONObject.");
/**
* Get the long value associated with a key.
*
- * @param key A key string.
- * @return The long value.
- * @throws JSONException if the key is not found or if the value cannot
- * be converted to a long.
+ * @param key A key string.
+ * @return The long value.
+ * @throws JSONException if the key is not found or if the value cannot
+ * be converted to a long.
*/
public long getLong(String key) throws JSONException {
Object object = this.get(key);
try {
return object instanceof Number
- ? ((Number)object).longValue()
- : Long.parseLong((String)object);
+ ? ((Number) object).longValue()
+ : Long.parseLong((String) object);
} catch (Exception e) {
throw new JSONException("JSONObject[" + quote(key) +
- "] is not a long.");
+ "] is not a long.");
}
}
/**
* Get the string associated with a key.
*
- * @param key A key string.
- * @return A string which is the value.
- * @throws JSONException if there is no string value for the key.
+ * @param key A key string.
+ * @return A string which is the value.
+ * @throws JSONException if there is no string value for the key.
*/
public String getString(String key) throws JSONException {
Object object = this.get(key);
if (object instanceof String) {
- return (String)object;
+ return (String) object;
}
throw new JSONException("JSONObject[" + quote(key) +
- "] not a string.");
+ "] not a string.");
}
/**
* Determine if the JSONObject contains a specific key.
- * @param key A key string.
- * @return true if the key exists in the JSONObject.
+ *
+ * @param key A key string.
+ * @return true if the key exists in the JSONObject.
*/
public boolean has(String key) {
return this.map.containsKey(key);
* Increment a property of a JSONObject. If there is no such property,
* create one with a value of 1. If there is such a property, and if
* it is an Integer, Long, Double, or Float, then add one to it.
- * @param key A key string.
+ *
+ * @param key A key string.
* @return this.
* @throws JSONException If there is already a property with this name
- * that is not an Integer, Long, Double, or Float.
+ * that is not an Integer, Long, Double, or Float.
*/
public LOGJSONObject increment(String key) throws JSONException {
Object value = this.opt(key);
if (value == null) {
this.put(key, 1);
} else if (value instanceof Integer) {
- this.put(key, ((Integer)value).intValue() + 1);
+ this.put(key, ((Integer) value).intValue() + 1);
} else if (value instanceof Long) {
- this.put(key, ((Long)value).longValue() + 1);
+ this.put(key, ((Long) value).longValue() + 1);
} else if (value instanceof Double) {
- this.put(key, ((Double)value).doubleValue() + 1);
+ this.put(key, ((Double) value).doubleValue() + 1);
} else if (value instanceof Float) {
- this.put(key, ((Float)value).floatValue() + 1);
+ this.put(key, ((Float) value).floatValue() + 1);
} else {
throw new JSONException("Unable to increment [" + quote(key) + "].");
}
/**
* Determine if the value associated with the key is null or if there is
- * no value.
- * @param key A key string.
- * @return true if there is no value associated with the key or if
- * the value is the JSONObject.NULL object.
+ * no value.
+ *
+ * @param key A key string.
+ * @return true if there is no value associated with the key or if
+ * the value is the JSONObject.NULL object.
*/
public boolean isNull(String key) {
return LOGJSONObject.NULL.equals(this.opt(key));
/**
* Produce a JSONArray containing the names of the elements of this
* JSONObject.
+ *
* @return A JSONArray containing the key strings, or null if the JSONObject
* is empty.
*/
/**
* Produce a string from a Number.
- * @param number A Number
+ *
+ * @param number A Number
* @return A String.
* @throws JSONException If n is a non-finite number.
*/
/**
* Get an optional value associated with a key.
- * @param key A key string.
- * @return An object which is the value, or null if there is no value.
+ *
+ * @param key A key string.
+ * @return An object which is the value, or null if there is no value.
*/
public Object opt(String key) {
return key == null ? null : this.map.get(key);
* It returns false if there is no such key, or if the value is not
* Boolean.TRUE or the String "true".
*
- * @param key A key string.
- * @return The truth.
+ * @param key A key string.
+ * @return The truth.
*/
public boolean optBoolean(String key) {
return this.optBoolean(key, false);
* It returns the defaultValue if there is no such key, or if it is not
* a Boolean or the String "true" or "false" (case insensitive).
*
- * @param key A key string.
- * @param defaultValue The default.
- * @return The truth.
+ * @param key A key string.
+ * @param defaultValue The default.
+ * @return The truth.
*/
public boolean optBoolean(String key, boolean defaultValue) {
try {
* If the value is a string, an attempt will be made to evaluate it as
* a number.
*
- * @param key A string which is the key.
- * @return An object which is the value.
+ * @param key A string which is the key.
+ * @return An object which is the value.
*/
public double optDouble(String key) {
return this.optDouble(key, Double.NaN);
* If the value is a string, an attempt will be made to evaluate it as
* a number.
*
- * @param key A key string.
- * @param defaultValue The default.
- * @return An object which is the value.
+ * @param key A key string.
+ * @param defaultValue The default.
+ * @return An object which is the value.
*/
public double optDouble(String key, double defaultValue) {
try {
* If the value is a string, an attempt will be made to evaluate it as
* a number.
*
- * @param key A key string.
- * @return An object which is the value.
+ * @param key A key string.
+ * @return An object which is the value.
*/
public int optInt(String key) {
return this.optInt(key, 0);
* If the value is a string, an attempt will be made to evaluate it as
* a number.
*
- * @param key A key string.
- * @param defaultValue The default.
- * @return An object which is the value.
+ * @param key A key string.
+ * @param defaultValue The default.
+ * @return An object which is the value.
*/
public int optInt(String key, int defaultValue) {
try {
* It returns null if there is no such key, or if its value is not a
* JSONArray.
*
- * @param key A key string.
- * @return A JSONArray which is the value.
+ * @param key A key string.
+ * @return A JSONArray which is the value.
*/
public JSONArray optJSONArray(String key) {
Object o = this.opt(key);
- return o instanceof JSONArray ? (JSONArray)o : null;
+ return o instanceof JSONArray ? (JSONArray) o : null;
}
* It returns null if there is no such key, or if its value is not a
* JSONObject.
*
- * @param key A key string.
- * @return A JSONObject which is the value.
+ * @param key A key string.
+ * @return A JSONObject which is the value.
*/
public LOGJSONObject optJSONObject(String key) {
Object object = this.opt(key);
- return object instanceof LOGJSONObject ? (LOGJSONObject)object : null;
+ return object instanceof LOGJSONObject ? (LOGJSONObject) object : null;
}
* If the value is a string, an attempt will be made to evaluate it as
* a number.
*
- * @param key A key string.
- * @return An object which is the value.
+ * @param key A key string.
+ * @return An object which is the value.
*/
public long optLong(String key) {
return this.optLong(key, 0);
*
* @param key A key string.
* @param defaultValue The default.
- * @return An object which is the value.
+ * @return An object which is the value.
*/
public long optLong(String key, long defaultValue) {
try {
* It returns an empty string if there is no such key. If the value is not
* a string and is not null, then it is converted to a string.
*
- * @param key A key string.
- * @return A string which is the value.
+ * @param key A key string.
+ * @return A string which is the value.
*/
public String optString(String key) {
return this.optString(key, "");
* Get an optional string associated with a key.
* It returns the defaultValue if there is no such key.
*
- * @param key A key string.
- * @param defaultValue The default.
- * @return A string which is the value.
+ * @param key A key string.
+ * @param defaultValue The default.
+ * @return A string which is the value.
*/
public String optString(String key, String defaultValue) {
Object object = this.opt(key);
key = key.toLowerCase();
} else if (!Character.isUpperCase(key.charAt(1))) {
key = key.substring(0, 1).toLowerCase() +
- key.substring(1);
+ key.substring(1);
}
- Object result = method.invoke(bean, (Object[])null);
+ Object result = method.invoke(bean, (Object[]) null);
if (result != null) {
this.map.put(key, wrap(result));
}
/**
* Put a key/value pair in the JSONObject, where the value will be a
* JSONArray which is produced from a Collection.
+ *
* @param key A key string.
* @param value A Collection value.
- * @return this.
+ * @return this.
* @throws JSONException
*/
public LOGJSONObject put(String key, Collection<Object> value) throws JSONException {
/**
* Put a key/value pair in the JSONObject, where the value will be a
* JSONObject which is produced from a Map.
+ *
* @param key A key string.
* @param value A Map value.
- * @return this.
+ * @return this.
* @throws JSONException
*/
public LOGJSONObject put(String key, Map<String, Object> value) throws JSONException {
/**
* Put a key/value pair in the JSONObject. If the value is null,
* then the key will be removed from the JSONObject if it is present.
+ *
* @param key A key string.
* @param value An object which is the value. It should be of one of these
- * types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,
- * or the JSONObject.NULL object.
+ * types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,
+ * or the JSONObject.NULL object.
* @return this.
* @throws JSONException If the value is non-finite number
- * or if the key is null.
+ * or if the key is null.
*/
public LOGJSONObject put(String key, Object value) throws JSONException {
String pooled;
}
if (value != null) {
testValidity(value);
- pooled = (String)keyPool.get(key);
+ pooled = (String) keyPool.get(key);
if (pooled == null) {
if (keyPool.size() >= keyPoolSize) {
keyPool = new LinkedHashMap<String, Object>(keyPoolSize);
* Put a key/value pair in the JSONObject, but only if the key and the
* value are both non-null, and only if there is not already a member
* with that name.
+ *
* @param key
* @param value
* @return his.
/**
* Put a key/value pair in the JSONObject, but only if the
* key and the value are both non-null.
+ *
* @param key A key string.
* @param value An object which is the value. It should be of one of these
- * types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,
- * or the JSONObject.NULL object.
+ * types: Boolean, Double, Integer, JSONArray, JSONObject, Long, String,
+ * or the JSONObject.NULL object.
* @return this.
* @throws JSONException If the value is a non-finite number.
*/
* right places. A backslash will be inserted within </, producing <\/,
* allowing JSON text to be delivered in HTML. In JSON text, a string
* cannot contain a control character or an unescaped quote or backslash.
+ *
* @param string A String
- * @return A String correctly formatted for insertion in a JSON text.
+ * @return A String correctly formatted for insertion in a JSON text.
*/
public static String quote(String string) {
StringWriter sw = new StringWriter();
b = c;
c = string.charAt(i);
switch (c) {
- case '\\':
- case '"':
- w.write('\\');
- w.write(c);
- break;
- case '/':
- if (b == '<') {
+ case '\\':
+ case '"':
w.write('\\');
- }
- w.write(c);
- break;
- case '\b':
- w.write("\\b");
- break;
- case '\t':
- w.write("\\t");
- break;
- case '\n':
- w.write("\\n");
- break;
- case '\f':
- w.write("\\f");
- break;
- case '\r':
- w.write("\\r");
- break;
- default:
- if (c < ' ' || (c >= '\u0080' && c < '\u00a0')
- || (c >= '\u2000' && c < '\u2100')) {
- w.write("\\u");
- hhhh = Integer.toHexString(c);
- w.write("0000", 0, 4 - hhhh.length());
- w.write(hhhh);
- } else {
w.write(c);
- }
+ break;
+ case '/':
+ if (b == '<') {
+ w.write('\\');
+ }
+ w.write(c);
+ break;
+ case '\b':
+ w.write("\\b");
+ break;
+ case '\t':
+ w.write("\\t");
+ break;
+ case '\n':
+ w.write("\\n");
+ break;
+ case '\f':
+ w.write("\\f");
+ break;
+ case '\r':
+ w.write("\\r");
+ break;
+ default:
+ if (c < ' ' || (c >= '\u0080' && c < '\u00a0')
+ || (c >= '\u2000' && c < '\u2100')) {
+ w.write("\\u");
+ hhhh = Integer.toHexString(c);
+ w.write("0000", 0, 4 - hhhh.length());
+ w.write(hhhh);
+ } else {
+ w.write(c);
+ }
}
}
w.write('"');
/**
* Remove a name and its value, if present.
+ *
* @param key The name to be removed.
* @return The value that was associated with the name,
* or null if there was no value.
/**
* Try to convert a string into a number, boolean, or null. If the string
* can't be converted, return the string.
+ *
* @param string A String.
* @return A simple JSON value.
*/
return myLong;
}
}
- } catch (Exception ignore) {
+ } catch (Exception ignore) {
}
}
return string;
/**
* Throw an exception if the object is a NaN or infinite number.
+ *
* @param o The object to test.
* @throws JSONException If o is a non-finite number.
*/
public static void testValidity(Object o) throws JSONException {
if (o != null) {
if (o instanceof Double) {
- if (((Double)o).isInfinite() || ((Double)o).isNaN()) {
+ if (((Double) o).isInfinite() || ((Double) o).isNaN()) {
throw new JSONException(
- "JSON does not allow non-finite numbers.");
+ "JSON does not allow non-finite numbers.");
}
} else if (o instanceof Float) {
- if (((Float)o).isInfinite() || ((Float)o).isNaN()) {
+ if (((Float) o).isInfinite() || ((Float) o).isNaN()) {
throw new JSONException(
- "JSON does not allow non-finite numbers.");
+ "JSON does not allow non-finite numbers.");
}
}
}
/**
* Produce a JSONArray containing the values of the members of this
* JSONObject.
+ *
* @param names A JSONArray containing a list of key strings. This
- * determines the sequence of the values in the result.
+ * determines the sequence of the values in the result.
* @return A JSONArray of values.
* @throws JSONException If any of the values are non-finite numbers.
*/
* Warning: This method assumes that the data structure is acyclical.
*
* @return a printable, displayable, portable, transmittable
- * representation of the object, beginning
- * with <code>{</code> <small>(left brace)</small> and ending
- * with <code>}</code> <small>(right brace)</small>.
+ * representation of the object, beginning
+ * with <code>{</code> <small>(left brace)</small> and ending
+ * with <code>}</code> <small>(right brace)</small>.
*/
public String toString() {
try {
* Make a prettyprinted JSON text of this JSONObject.
* <p>
* Warning: This method assumes that the data structure is acyclical.
+ *
* @param indentFactor The number of spaces to add to each level of
- * indentation.
+ * indentation.
* @return a printable, displayable, portable, transmittable
- * representation of the object, beginning
- * with <code>{</code> <small>(left brace)</small> and ending
- * with <code>}</code> <small>(right brace)</small>.
+ * representation of the object, beginning
+ * with <code>{</code> <small>(left brace)</small> and ending
+ * with <code>}</code> <small>(right brace)</small>.
* @throws JSONException If the object contains an invalid number.
*/
public String toString(int indentFactor) throws JSONException {
*
* <p>
* Warning: This method assumes that the data structure is acyclical.
+ *
* @param value The value to be serialized.
* @return a printable, displayable, transmittable
- * representation of the object, beginning
- * with <code>{</code> <small>(left brace)</small> and ending
- * with <code>}</code> <small>(right brace)</small>.
+ * representation of the object, beginning
+ * with <code>{</code> <small>(left brace)</small> and ending
+ * with <code>}</code> <small>(right brace)</small>.
* @throws JSONException If the value is or contains an invalid number.
*/
@SuppressWarnings("unchecked")
- public static String valueToString(Object value) throws JSONException {
+ public static String valueToString(Object value) throws JSONException {
if (value == null || value.equals(null)) {
return "null";
}
if (value instanceof JSONString) {
Object object;
try {
- object = ((JSONString)value).toJSONString();
+ object = ((JSONString) value).toJSONString();
} catch (Exception e) {
throw new JSONException(e);
}
if (object instanceof String) {
- return (String)object;
+ return (String) object;
}
throw new JSONException("Bad value from toJSONString: " + object);
}
return value.toString();
}
if (value instanceof Map) {
- return new LOGJSONObject((Map<String, Object>)value).toString();
+ return new LOGJSONObject((Map<String, Object>) value).toString();
}
if (value instanceof Collection) {
- return new JSONArray((Collection<Object>)value).toString();
+ return new JSONArray((Collection<Object>) value).toString();
}
if (value.getClass().isArray()) {
return new JSONArray(value).toString();
return quote(value.toString());
}
- /**
- * Wrap an object, if necessary. If the object is null, return the NULL
- * object. If it is an array or collection, wrap it in a JSONArray. If
- * it is a map, wrap it in a JSONObject. If it is a standard property
- * (Double, String, et al) then it is already wrapped. Otherwise, if it
- * comes from one of the java packages, turn it into a string. And if
- * it doesn't, try to wrap it in a JSONObject. If the wrapping fails,
- * then null is returned.
- *
- * @param object The object to wrap
- * @return The wrapped value
- */
- @SuppressWarnings("unchecked")
- public static Object wrap(Object object) {
- try {
- if (object == null) {
- return NULL;
- }
- if (object instanceof LOGJSONObject || object instanceof JSONArray ||
- NULL.equals(object) || object instanceof JSONString ||
- object instanceof Byte || object instanceof Character ||
- object instanceof Short || object instanceof Integer ||
- object instanceof Long || object instanceof Boolean ||
- object instanceof Float || object instanceof Double ||
- object instanceof String) {
- return object;
- }
-
- if (object instanceof Collection) {
- return new JSONArray((Collection<Object>)object);
- }
- if (object.getClass().isArray()) {
- return new JSONArray(object);
- }
- if (object instanceof Map) {
- return new LOGJSONObject((Map<String, Object>)object);
- }
- Package objectPackage = object.getClass().getPackage();
- String objectPackageName = objectPackage != null
- ? objectPackage.getName()
- : "";
- if (
- objectPackageName.startsWith("java.") ||
- objectPackageName.startsWith("javax.") ||
- object.getClass().getClassLoader() == null
- ) {
- return object.toString();
- }
- return new LOGJSONObject(object);
- } catch(Exception exception) {
- return null;
- }
- }
-
-
- /**
- * Write the contents of the JSONObject as JSON text to a writer.
- * For compactness, no whitespace is added.
- * <p>
- * Warning: This method assumes that the data structure is acyclical.
- *
- * @return The writer.
- * @throws JSONException
- */
- public Writer write(Writer writer) throws JSONException {
+ /**
+ * Wrap an object, if necessary. If the object is null, return the NULL
+ * object. If it is an array or collection, wrap it in a JSONArray. If
+ * it is a map, wrap it in a JSONObject. If it is a standard property
+ * (Double, String, et al) then it is already wrapped. Otherwise, if it
+ * comes from one of the java packages, turn it into a string. And if
+ * it doesn't, try to wrap it in a JSONObject. If the wrapping fails,
+ * then null is returned.
+ *
+ * @param object The object to wrap
+ * @return The wrapped value
+ */
+ @SuppressWarnings("unchecked")
+ public static Object wrap(Object object) {
+ try {
+ if (object == null) {
+ return NULL;
+ }
+ if (object instanceof LOGJSONObject || object instanceof JSONArray ||
+ NULL.equals(object) || object instanceof JSONString ||
+ object instanceof Byte || object instanceof Character ||
+ object instanceof Short || object instanceof Integer ||
+ object instanceof Long || object instanceof Boolean ||
+ object instanceof Float || object instanceof Double ||
+ object instanceof String) {
+ return object;
+ }
+
+ if (object instanceof Collection) {
+ return new JSONArray((Collection<Object>) object);
+ }
+ if (object.getClass().isArray()) {
+ return new JSONArray(object);
+ }
+ if (object instanceof Map) {
+ return new LOGJSONObject((Map<String, Object>) object);
+ }
+ Package objectPackage = object.getClass().getPackage();
+ String objectPackageName = objectPackage != null
+ ? objectPackage.getName()
+ : "";
+ if (
+ objectPackageName.startsWith("java.") ||
+ objectPackageName.startsWith("javax.") ||
+ object.getClass().getClassLoader() == null
+ ) {
+ return object.toString();
+ }
+ return new LOGJSONObject(object);
+ } catch (Exception exception) {
+ return null;
+ }
+ }
+
+
+ /**
+ * Write the contents of the JSONObject as JSON text to a writer.
+ * For compactness, no whitespace is added.
+ * <p>
+ * Warning: This method assumes that the data structure is acyclical.
+ *
+ * @return The writer.
+ * @throws JSONException
+ */
+ public Writer write(Writer writer) throws JSONException {
return this.write(writer, 0, 0);
}
@SuppressWarnings("unchecked")
- static final Writer writeValue(Writer writer, Object value,
- int indentFactor, int indent) throws JSONException, IOException {
+ static final Writer writeValue(Writer writer, Object value,
+ int indentFactor, int indent) throws JSONException, IOException {
if (value == null || value.equals(null)) {
writer.write("null");
} else if (value instanceof LOGJSONObject) {
} catch (IOException exception) {
throw new JSONException(exception);
}
- }
+ }
}
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: LogfileLoader.java,v 1.22 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class LogfileLoader extends Thread {\r
- /** Default number of log records to keep when pruning. Keep 10M by default. */\r
- public static final long DEFAULT_LOG_RETENTION = 10000000L;\r
- /** NOT USED: Percentage of free space required before old records are removed. */\r
- public static final int REQUIRED_FREE_PCT = 20;\r
+ /**\r
+ * Default number of log records to keep when pruning. Keep 10M by default.\r
+ */\r
+ public static final long DEFAULT_LOG_RETENTION = 10000000L;\r
+ /**\r
+ * NOT USED: Percentage of free space required before old records are removed.\r
+ */\r
+ public static final int REQUIRED_FREE_PCT = 20;\r
+\r
+ /**\r
+ * This is a singleton -- there is only one LogfileLoader object in the server\r
+ */\r
+ private static LogfileLoader p;\r
+\r
+ /**\r
+ * Get the singleton LogfileLoader object, and start it if it is not running.\r
+ *\r
+ * @return the LogfileLoader\r
+ */\r
+ public static synchronized LogfileLoader getLoader() {\r
+ if (p == null)\r
+ p = new LogfileLoader();\r
+ if (!p.isAlive())\r
+ p.start();\r
+ return p;\r
+ }\r
+\r
+ /**\r
+ * The PreparedStatement which is loaded by a <i>Loadable</i>.\r
+ */\r
+ public static final String INSERT_SQL = "insert into LOG_RECORDS values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";\r
+ /**\r
+ * Each server can assign this many IDs\r
+ */\r
+ private static final long SET_SIZE = (1L << 56);\r
+\r
+ private final Logger logger;\r
+ private final DB db;\r
+ private final String spooldir;\r
+ private final long set_start;\r
+ private final long set_end;\r
+ private RLEBitSet seq_set;\r
+ private long nextid;\r
+ private boolean idle;\r
+\r
+ private LogfileLoader() {\r
+ this.logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ this.db = new DB();\r
+ this.spooldir = db.getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");\r
+ this.set_start = getIdRange();\r
+ this.set_end = set_start + SET_SIZE - 1;\r
+ this.seq_set = new RLEBitSet();\r
+ this.nextid = 0;\r
+ this.idle = false;\r
+\r
+ // This is a potentially lengthy operation, so has been moved to run()\r
+ //initializeNextid();\r
+ this.setDaemon(true);\r
+ this.setName("LogfileLoader");\r
+ }\r
+\r
+ private long getIdRange() {\r
+ long n;\r
+ if (BaseServlet.isInitialActivePOD())\r
+ n = 0;\r
+ else if (BaseServlet.isInitialStandbyPOD())\r
+ n = SET_SIZE;\r
+ else\r
+ n = SET_SIZE * 2;\r
+ String r = String.format("[%X .. %X]", n, n + SET_SIZE - 1);\r
+ logger.debug("This server shall assign RECORD_IDs in the range " + r);\r
+ return n;\r
+ }\r
+\r
+ /**\r
+ * Return the bit set representing the record ID's that are loaded in this database.\r
+ *\r
+ * @return the bit set\r
+ */\r
+ public RLEBitSet getBitSet() {\r
+ return seq_set;\r
+ }\r
\r
- /** This is a singleton -- there is only one LogfileLoader object in the server */\r
- private static LogfileLoader p;\r
+ /**\r
+ * True if the LogfileLoader is currently waiting for work.\r
+ *\r
+ * @return true if idle\r
+ */\r
+ public boolean isIdle() {\r
+ return idle;\r
+ }\r
\r
- /**\r
- * Get the singleton LogfileLoader object, and start it if it is not running.\r
- * @return the LogfileLoader\r
- */\r
- public static synchronized LogfileLoader getLoader() {\r
- if (p == null)\r
- p = new LogfileLoader();\r
- if (!p.isAlive())\r
- p.start();\r
- return p;\r
- }\r
+ /**\r
+ * Run continuously to look for new logfiles in the spool directory and import them into the DB.\r
+ * The spool is checked once per second. If free space on the MariaDB filesystem falls below\r
+ * REQUIRED_FREE_PCT (normally 20%) then the oldest logfile entries are removed and the LOG_RECORDS\r
+ * table is compacted until free space rises above the threshold.\r
+ */\r
+ @Override\r
+ public void run() {\r
+ initializeNextid(); // moved from the constructor\r
+ while (true) {\r
+ try {\r
+ File dirfile = new File(spooldir);\r
+ while (true) {\r
+ // process IN files\r
+ File[] infiles = dirfile.listFiles(new FilenameFilter() {\r
+ @Override\r
+ public boolean accept(File dir, String name) {\r
+ return name.startsWith("IN.");\r
+ }\r
+ });\r
\r
- /** The PreparedStatement which is loaded by a <i>Loadable</i>. */\r
- public static final String INSERT_SQL = "insert into LOG_RECORDS values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";\r
- /** Each server can assign this many IDs */\r
- private static final long SET_SIZE = (1L << 56);\r
+ if (infiles.length == 0) {\r
+ idle = true;\r
+ try {\r
+ Thread.sleep(1000L);\r
+ } catch (InterruptedException e) {\r
+ }\r
+ idle = false;\r
+ } else {\r
+ // Remove old rows\r
+ if (pruneRecords()) {\r
+ // Removed at least some entries, recompute the bit map\r
+ initializeNextid();\r
+ }\r
\r
- private final Logger logger;\r
- private final DB db;\r
- private final String spooldir;\r
- private final long set_start;\r
- private final long set_end;\r
- private RLEBitSet seq_set;\r
- private long nextid;\r
- private boolean idle;\r
+ // Process incoming logfiles\r
+ for (File f : infiles) {\r
+ if (logger.isDebugEnabled())\r
+ logger.debug("PROV8001 Starting " + f + " ...");\r
+ long time = System.currentTimeMillis();\r
+ int[] n = process(f);\r
+ time = System.currentTimeMillis() - time;\r
+ logger.info(String\r
+ .format("PROV8000 Processed %s in %d ms; %d of %d records.",\r
+ f.toString(), time, n[0], n[1]));\r
+ f.delete();\r
+ }\r
+ }\r
+ }\r
+ } catch (Exception e) {\r
+ logger.warn("PROV0020: Caught exception in LogfileLoader: " + e);\r
+ e.printStackTrace();\r
+ }\r
+ }\r
+ }\r
\r
- private LogfileLoader() {\r
- this.logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- this.db = new DB();\r
- this.spooldir = db.getProperties().getProperty("org.onap.dmaap.datarouter.provserver.spooldir");\r
- this.set_start = getIdRange();\r
- this.set_end = set_start + SET_SIZE - 1;\r
- this.seq_set = new RLEBitSet();\r
- this.nextid = 0;\r
- this.idle = false;\r
+ private boolean pruneRecords() {\r
+ boolean did1 = false;\r
+ long count = countRecords();\r
+ long threshold = DEFAULT_LOG_RETENTION;\r
+ Parameters param = Parameters.getParameter(Parameters.PROV_LOG_RETENTION);\r
+ if (param != null) {\r
+ try {\r
+ long n = Long.parseLong(param.getValue());\r
+ // This check is to prevent inadvertent errors from wiping the table out\r
+ if (n > 1000000L)\r
+ threshold = n;\r
+ } catch (NumberFormatException e) {\r
+ // ignore\r
+ }\r
+ }\r
+ logger.debug("Pruning LOG_RECORD table: records in DB=" + count + ", threshold=" + threshold);\r
+ if (count > threshold) {\r
+ count -= threshold; // we need to remove this many records;\r
+ Map<Long, Long> hist = getHistogram(); // histogram of records per day\r
+ // Determine the cutoff point to remove the needed number of records\r
+ long sum = 0;\r
+ long cutoff = 0;\r
+ for (Long day : new TreeSet<Long>(hist.keySet())) {\r
+ sum += hist.get(day);\r
+ cutoff = day;\r
+ if (sum >= count)\r
+ break;\r
+ }\r
+ cutoff++;\r
+ cutoff *= 86400000L; // convert day to ms\r
+ logger.debug(" Pruning records older than=" + (cutoff / 86400000L) + " (" + new Date(cutoff) + ")");\r
\r
- // This is a potentially lengthy operation, so has been moved to run()\r
- //initializeNextid();\r
- this.setDaemon(true);\r
- this.setName("LogfileLoader");\r
- }\r
+ Connection conn = null;\r
+ try {\r
+ // Limit to a million at a time to avoid typing up the DB for too long.\r
+ conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement("DELETE from LOG_RECORDS where EVENT_TIME < ? limit 1000000");\r
+ ps.setLong(1, cutoff);\r
+ while (count > 0) {\r
+ if (!ps.execute()) {\r
+ int dcount = ps.getUpdateCount();\r
+ count -= dcount;\r
+ logger.debug(" " + dcount + " rows deleted.");\r
+ did1 |= (dcount != 0);\r
+ if (dcount == 0)\r
+ count = 0; // prevent inf. loops\r
+ } else {\r
+ count = 0; // shouldn't happen!\r
+ }\r
+ }\r
+ ps.close();\r
+ Statement stmt = conn.createStatement();\r
+ stmt.execute("OPTIMIZE TABLE LOG_RECORDS");\r
+ stmt.close();\r
+ } catch (SQLException e) {\r
+ System.err.println(e);\r
+ e.printStackTrace();\r
+ } finally {\r
+ db.release(conn);\r
+ }\r
+ }\r
+ return did1;\r
+ }\r
\r
- private long getIdRange() {\r
- long n;\r
- if (BaseServlet.isInitialActivePOD())\r
- n = 0;\r
- else if (BaseServlet.isInitialStandbyPOD())\r
- n = SET_SIZE;\r
- else\r
- n = SET_SIZE * 2;\r
- String r = String.format("[%X .. %X]", n, n+SET_SIZE-1);\r
- logger.debug("This server shall assign RECORD_IDs in the range "+r);\r
- return n;\r
- }\r
- /**\r
- * Return the bit set representing the record ID's that are loaded in this database.\r
- * @return the bit set\r
- */\r
- public RLEBitSet getBitSet() {\r
- return seq_set;\r
- }\r
- /**\r
- * True if the LogfileLoader is currently waiting for work.\r
- * @return true if idle\r
- */\r
- public boolean isIdle() {\r
- return idle;\r
- }\r
- /**\r
- * Run continuously to look for new logfiles in the spool directory and import them into the DB.\r
- * The spool is checked once per second. If free space on the MariaDB filesystem falls below\r
- * REQUIRED_FREE_PCT (normally 20%) then the oldest logfile entries are removed and the LOG_RECORDS\r
- * table is compacted until free space rises above the threshold.\r
- */\r
- @Override\r
- public void run() {\r
- initializeNextid(); // moved from the constructor\r
- while (true) {\r
- try {\r
- File dirfile = new File(spooldir);\r
- while (true) {\r
- // process IN files\r
- File[] infiles = dirfile.listFiles(new FilenameFilter() {\r
- @Override\r
- public boolean accept(File dir, String name) {\r
- return name.startsWith("IN.");\r
- }\r
- });\r
+ private long countRecords() {\r
+ long count = 0;\r
+ Connection conn = null;\r
+ try {\r
+ conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("SELECT COUNT(*) as COUNT from LOG_RECORDS");\r
+ if (rs.next()) {\r
+ count = rs.getLong("COUNT");\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ } catch (SQLException e) {\r
+ System.err.println(e);\r
+ e.printStackTrace();\r
+ } finally {\r
+ db.release(conn);\r
+ }\r
+ return count;\r
+ }\r
\r
- if (infiles.length == 0) {\r
- idle = true;\r
- try {\r
- Thread.sleep(1000L);\r
- } catch (InterruptedException e) {\r
- }\r
- idle = false;\r
- } else {\r
- // Remove old rows\r
- if (pruneRecords()) {\r
- // Removed at least some entries, recompute the bit map\r
- initializeNextid();\r
- }\r
+ private Map<Long, Long> getHistogram() {\r
+ Map<Long, Long> map = new HashMap<Long, Long>();\r
+ Connection conn = null;\r
+ try {\r
+ logger.debug(" LOG_RECORD table histogram...");\r
+ conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ ResultSet rs = stmt.executeQuery("SELECT FLOOR(EVENT_TIME/86400000) AS DAY, COUNT(*) AS COUNT FROM LOG_RECORDS GROUP BY DAY");\r
+ while (rs.next()) {\r
+ long day = rs.getLong("DAY");\r
+ long cnt = rs.getLong("COUNT");\r
+ map.put(day, cnt);\r
+ logger.debug(" " + day + " " + cnt);\r
+ }\r
+ rs.close();\r
+ stmt.close();\r
+ } catch (SQLException e) {\r
+ System.err.println(e);\r
+ e.printStackTrace();\r
+ } finally {\r
+ db.release(conn);\r
+ }\r
+ return map;\r
+ }\r
\r
- // Process incoming logfiles\r
- for (File f : infiles) {\r
- if (logger.isDebugEnabled())\r
- logger.debug("PROV8001 Starting " + f + " ...");\r
- long time = System.currentTimeMillis();\r
- int[] n = process(f);\r
- time = System.currentTimeMillis() - time;\r
- logger.info(String\r
- .format("PROV8000 Processed %s in %d ms; %d of %d records.",\r
- f.toString(), time, n[0], n[1]));\r
- f.delete();\r
- }\r
- }\r
- }\r
- } catch (Exception e) {\r
- logger.warn("PROV0020: Caught exception in LogfileLoader: " + e);\r
- e.printStackTrace();\r
- }\r
- }\r
- }\r
- private boolean pruneRecords() {\r
- boolean did1 = false;\r
- long count = countRecords();\r
- long threshold = DEFAULT_LOG_RETENTION;\r
- Parameters param = Parameters.getParameter(Parameters.PROV_LOG_RETENTION);\r
- if (param != null) {\r
- try {\r
- long n = Long.parseLong(param.getValue());\r
- // This check is to prevent inadvertent errors from wiping the table out\r
- if (n > 1000000L)\r
- threshold = n;\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- }\r
- logger.debug("Pruning LOG_RECORD table: records in DB="+count+", threshold="+threshold);\r
- if (count > threshold) {\r
- count -= threshold; // we need to remove this many records;\r
- Map<Long,Long> hist = getHistogram(); // histogram of records per day\r
- // Determine the cutoff point to remove the needed number of records\r
- long sum = 0;\r
- long cutoff = 0;\r
- for (Long day : new TreeSet<Long>(hist.keySet())) {\r
- sum += hist.get(day);\r
- cutoff = day;\r
- if (sum >= count)\r
- break;\r
- }\r
- cutoff++;\r
- cutoff *= 86400000L; // convert day to ms\r
- logger.debug(" Pruning records older than="+(cutoff/86400000L)+" ("+new Date(cutoff)+")");\r
+ private void initializeNextid() {\r
+ Connection conn = null;\r
+ try {\r
+ conn = db.getConnection();\r
+ Statement stmt = conn.createStatement();\r
+ // Build a bitset of all records in the LOG_RECORDS table\r
+ // We need to run this SELECT in stages, because otherwise we run out of memory!\r
+ RLEBitSet nbs = new RLEBitSet();\r
+ final long stepsize = 6000000L;\r
+ boolean go_again = true;\r
+ for (long i = 0; go_again; i += stepsize) {\r
+ String sql = String.format("select RECORD_ID from LOG_RECORDS LIMIT %d,%d", i, stepsize);\r
+ ResultSet rs = stmt.executeQuery(sql);\r
+ go_again = false;\r
+ while (rs.next()) {\r
+ long n = rs.getLong("RECORD_ID");\r
+ nbs.set(n);\r
+ go_again = true;\r
+ }\r
+ rs.close();\r
+ }\r
+ stmt.close();\r
+ seq_set = nbs;\r
\r
- Connection conn = null;\r
- try {\r
- // Limit to a million at a time to avoid typing up the DB for too long.\r
- conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement("DELETE from LOG_RECORDS where EVENT_TIME < ? limit 1000000");\r
- ps.setLong(1, cutoff);\r
- while (count > 0) {\r
- if (!ps.execute()) {\r
- int dcount = ps.getUpdateCount();\r
- count -= dcount;\r
- logger.debug(" "+dcount+" rows deleted.");\r
- did1 |= (dcount!=0);\r
- if (dcount == 0)\r
- count = 0; // prevent inf. loops\r
- } else {\r
- count = 0; // shouldn't happen!\r
- }\r
- }\r
- ps.close();\r
- Statement stmt = conn.createStatement();\r
- stmt.execute("OPTIMIZE TABLE LOG_RECORDS");\r
- stmt.close();\r
- } catch (SQLException e) {\r
- System.err.println(e);\r
- e.printStackTrace();\r
- } finally {\r
- db.release(conn);\r
- }\r
- }\r
- return did1;\r
- }\r
- private long countRecords() {\r
- long count = 0;\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("SELECT COUNT(*) as COUNT from LOG_RECORDS");\r
- if (rs.next()) {\r
- count = rs.getLong("COUNT");\r
- }\r
- rs.close();\r
- stmt.close();\r
- } catch (SQLException e) {\r
- System.err.println(e);\r
- e.printStackTrace();\r
- } finally {\r
- db.release(conn);\r
- }\r
- return count;\r
- }\r
- private Map<Long,Long> getHistogram() {\r
- Map<Long,Long> map = new HashMap<Long,Long>();\r
- Connection conn = null;\r
- try {\r
- logger.debug(" LOG_RECORD table histogram...");\r
- conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- ResultSet rs = stmt.executeQuery("SELECT FLOOR(EVENT_TIME/86400000) AS DAY, COUNT(*) AS COUNT FROM LOG_RECORDS GROUP BY DAY");\r
- while (rs.next()) {\r
- long day = rs.getLong("DAY");\r
- long cnt = rs.getLong("COUNT");\r
- map.put(day, cnt);\r
- logger.debug(" "+day + " "+cnt);\r
- }\r
- rs.close();\r
- stmt.close();\r
- } catch (SQLException e) {\r
- System.err.println(e);\r
- e.printStackTrace();\r
- } finally {\r
- db.release(conn);\r
- }\r
- return map;\r
- }\r
- private void initializeNextid() {\r
- Connection conn = null;\r
- try {\r
- conn = db.getConnection();\r
- Statement stmt = conn.createStatement();\r
- // Build a bitset of all records in the LOG_RECORDS table\r
- // We need to run this SELECT in stages, because otherwise we run out of memory!\r
- RLEBitSet nbs = new RLEBitSet();\r
- final long stepsize = 6000000L;\r
- boolean go_again = true;\r
- for (long i = 0; go_again; i += stepsize) {\r
- String sql = String.format("select RECORD_ID from LOG_RECORDS LIMIT %d,%d", i, stepsize);\r
- ResultSet rs = stmt.executeQuery(sql);\r
- go_again = false;\r
- while (rs.next()) {\r
- long n = rs.getLong("RECORD_ID");\r
- nbs.set(n);\r
- go_again = true;\r
- }\r
- rs.close();\r
- }\r
- stmt.close();\r
- seq_set = nbs;\r
+ // Compare with the range for this server\r
+ // Determine the next ID for this set of record IDs\r
+ RLEBitSet tbs = (RLEBitSet) nbs.clone();\r
+ RLEBitSet idset = new RLEBitSet();\r
+ idset.set(set_start, set_start + SET_SIZE);\r
+ tbs.and(idset);\r
+ long t = tbs.length();\r
+ nextid = (t == 0) ? set_start : (t - 1);\r
+ if (nextid >= set_start + SET_SIZE) {\r
+ // Handle wraparound, when the IDs reach the end of our "range"\r
+ Long[] last = null;\r
+ Iterator<Long[]> li = tbs.getRangeIterator();\r
+ while (li.hasNext()) {\r
+ last = li.next();\r
+ }\r
+ if (last != null) {\r
+ tbs.clear(last[0], last[1] + 1);\r
+ t = tbs.length();\r
+ nextid = (t == 0) ? set_start : (t - 1);\r
+ }\r
+ }\r
+ logger.debug(String.format("initializeNextid, next ID is %d (%x)", nextid, nextid));\r
+ } catch (SQLException e) {\r
+ System.err.println(e);\r
+ e.printStackTrace();\r
+ } finally {\r
+ db.release(conn);\r
+ }\r
+ }\r
\r
- // Compare with the range for this server\r
- // Determine the next ID for this set of record IDs\r
- RLEBitSet tbs = (RLEBitSet) nbs.clone();\r
- RLEBitSet idset = new RLEBitSet();\r
- idset.set(set_start, set_start+SET_SIZE);\r
- tbs.and(idset);\r
- long t = tbs.length();\r
- nextid = (t == 0) ? set_start : (t - 1);\r
- if (nextid >= set_start+SET_SIZE) {\r
- // Handle wraparound, when the IDs reach the end of our "range"\r
- Long[] last = null;\r
- Iterator<Long[]> li = tbs.getRangeIterator();\r
- while (li.hasNext()) {\r
- last = li.next();\r
- }\r
- if (last != null) {\r
- tbs.clear(last[0], last[1]+1);\r
- t = tbs.length();\r
- nextid = (t == 0) ? set_start : (t - 1);\r
- }\r
- }\r
- logger.debug(String.format("initializeNextid, next ID is %d (%x)", nextid, nextid));\r
- } catch (SQLException e) {\r
- System.err.println(e);\r
- e.printStackTrace();\r
- } finally {\r
- db.release(conn);\r
- }\r
- }\r
+ @SuppressWarnings("resource")\r
+ private int[] process(File f) {\r
+ int ok = 0, total = 0;\r
+ try {\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(INSERT_SQL);\r
+ Reader r = f.getPath().endsWith(".gz")\r
+ ? new InputStreamReader(new GZIPInputStream(new FileInputStream(f)))\r
+ : new FileReader(f);\r
+ LineNumberReader in = new LineNumberReader(r);\r
+ String line;\r
+ while ((line = in.readLine()) != null) {\r
+ try {\r
+ for (Loadable rec : buildRecords(line)) {\r
+ rec.load(ps);\r
+ if (rec instanceof LogRecord) {\r
+ LogRecord lr = ((LogRecord) rec);\r
+ if (!seq_set.get(lr.getRecordId())) {\r
+ ps.executeUpdate();\r
+ seq_set.set(lr.getRecordId());\r
+ } else\r
+ logger.debug("Duplicate record ignored: " + lr.getRecordId());\r
+ } else {\r
+ if (++nextid > set_end)\r
+ nextid = set_start;\r
+ ps.setLong(18, nextid);\r
+ ps.executeUpdate();\r
+ seq_set.set(nextid);\r
+ }\r
+ ps.clearParameters();\r
+ ok++;\r
+ }\r
+ } catch (SQLException e) {\r
+ logger.warn("PROV8003 Invalid value in record: " + line);\r
+ logger.debug(e);\r
+ e.printStackTrace();\r
+ } catch (NumberFormatException e) {\r
+ logger.warn("PROV8004 Invalid number in record: " + line);\r
+ logger.debug(e);\r
+ e.printStackTrace();\r
+ } catch (ParseException e) {\r
+ logger.warn("PROV8005 Invalid date in record: " + line);\r
+ logger.debug(e);\r
+ e.printStackTrace();\r
+ } catch (Exception e) {\r
+ logger.warn("PROV8006 Invalid pattern in record: " + line);\r
+ logger.debug(e);\r
+ e.printStackTrace();\r
+ }\r
+ total++;\r
+ }\r
+ in.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ conn = null;\r
+ } catch (FileNotFoundException e) {\r
+ logger.warn("PROV8007 Exception reading " + f + ": " + e);\r
+ } catch (IOException e) {\r
+ logger.warn("PROV8007 Exception reading " + f + ": " + e);\r
+ } catch (SQLException e) {\r
+ logger.warn("PROV8007 Exception reading " + f + ": " + e);\r
+ }\r
+ return new int[]{ok, total};\r
+ }\r
\r
- @SuppressWarnings("resource")\r
- private int[] process(File f) {\r
- int ok = 0, total = 0;\r
- try {\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(INSERT_SQL);\r
- Reader r = f.getPath().endsWith(".gz")\r
- ? new InputStreamReader(new GZIPInputStream(new FileInputStream(f)))\r
- : new FileReader(f);\r
- LineNumberReader in = new LineNumberReader(r);\r
- String line;\r
- while ((line = in.readLine()) != null) {\r
- try {\r
- for (Loadable rec : buildRecords(line)) {\r
- rec.load(ps);\r
- if (rec instanceof LogRecord) {\r
- LogRecord lr = ((LogRecord)rec);\r
- if (!seq_set.get(lr.getRecordId())) {\r
- ps.executeUpdate();\r
- seq_set.set(lr.getRecordId());\r
- } else\r
- logger.debug("Duplicate record ignored: "+lr.getRecordId());\r
- } else {\r
- if (++nextid > set_end)\r
- nextid = set_start;\r
- ps.setLong(18, nextid);\r
- ps.executeUpdate();\r
- seq_set.set(nextid);\r
- }\r
- ps.clearParameters();\r
- ok++;\r
- }\r
- } catch (SQLException e) {\r
- logger.warn("PROV8003 Invalid value in record: "+line);\r
- logger.debug(e);\r
- e.printStackTrace();\r
- } catch (NumberFormatException e) {\r
- logger.warn("PROV8004 Invalid number in record: "+line);\r
- logger.debug(e);\r
- e.printStackTrace();\r
- } catch (ParseException e) {\r
- logger.warn("PROV8005 Invalid date in record: "+line);\r
- logger.debug(e);\r
- e.printStackTrace();\r
- } catch (Exception e) {\r
- logger.warn("PROV8006 Invalid pattern in record: "+line);\r
- logger.debug(e);\r
- e.printStackTrace();\r
- }\r
- total++;\r
- }\r
- in.close();\r
- ps.close();\r
- db.release(conn);\r
- conn = null;\r
- } catch (FileNotFoundException e) {\r
- logger.warn("PROV8007 Exception reading "+f+": "+e);\r
- } catch (IOException e) {\r
- logger.warn("PROV8007 Exception reading "+f+": "+e);\r
- } catch (SQLException e) {\r
- logger.warn("PROV8007 Exception reading "+f+": "+e);\r
- }\r
- return new int[] { ok, total };\r
- }\r
- private Loadable[] buildRecords(String line) throws ParseException {\r
- String[] pp = line.split("\\|");\r
- if (pp != null && pp.length >= 7) {\r
- String rtype = pp[1].toUpperCase();\r
- if (rtype.equals("PUB") && pp.length == 11) {\r
- // Fields are: date|PUB|pubid|feedid|requrl|method|ctype|clen|srcip|user|status\r
- return new Loadable[] { new PublishRecord(pp) };\r
- }\r
- if (rtype.equals("DEL") && pp.length == 12) {\r
- // Fields are: date|DEL|pubid|feedid|subid|requrl|method|ctype|clen|user|status|xpubid\r
- String[] subs = pp[4].split("\\s+");\r
- if (subs != null) {\r
- Loadable[] rv = new Loadable[subs.length];\r
- for (int i = 0; i < subs.length; i++) {\r
- // create a new record for each individual sub\r
- pp[4] = subs[i];\r
- rv[i] = new DeliveryRecord(pp);\r
- }\r
- return rv;\r
- }\r
- }\r
- if (rtype.equals("EXP") && pp.length == 11) {\r
- // Fields are: date|EXP|pubid|feedid|subid|requrl|method|ctype|clen|reason|attempts\r
- ExpiryRecord e = new ExpiryRecord(pp);\r
- if (e.getReason().equals("other"))\r
- logger.info("Invalid reason '"+pp[9]+"' changed to 'other' for record: "+e.getPublishId());\r
- return new Loadable[] { e };\r
- }\r
- if (rtype.equals("PBF") && pp.length == 12) {\r
- // Fields are: date|PBF|pubid|feedid|requrl|method|ctype|clen-expected|clen-received|srcip|user|error\r
- return new Loadable[] { new PubFailRecord(pp) };\r
- }\r
- if (rtype.equals("DLX") && pp.length == 7) {\r
- // Fields are: date|DLX|pubid|feedid|subid|clen-tosend|clen-sent\r
- return new Loadable[] { new DeliveryExtraRecord(pp) };\r
- }\r
- if (rtype.equals("LOG") && (pp.length == 19 || pp.length == 20)) {\r
- // Fields are: date|LOG|pubid|feedid|requrl|method|ctype|clen|type|feedFileid|remoteAddr|user|status|subid|fileid|result|attempts|reason|record_id\r
- return new Loadable[] { new LogRecord(pp) };\r
- }\r
- }\r
- logger.warn("PROV8002 bad record: "+line);\r
- return new Loadable[0];\r
- }\r
+ private Loadable[] buildRecords(String line) throws ParseException {\r
+ String[] pp = line.split("\\|");\r
+ if (pp != null && pp.length >= 7) {\r
+ String rtype = pp[1].toUpperCase();\r
+ if (rtype.equals("PUB") && pp.length == 11) {\r
+ // Fields are: date|PUB|pubid|feedid|requrl|method|ctype|clen|srcip|user|status\r
+ return new Loadable[]{new PublishRecord(pp)};\r
+ }\r
+ if (rtype.equals("DEL") && pp.length == 12) {\r
+ // Fields are: date|DEL|pubid|feedid|subid|requrl|method|ctype|clen|user|status|xpubid\r
+ String[] subs = pp[4].split("\\s+");\r
+ if (subs != null) {\r
+ Loadable[] rv = new Loadable[subs.length];\r
+ for (int i = 0; i < subs.length; i++) {\r
+ // create a new record for each individual sub\r
+ pp[4] = subs[i];\r
+ rv[i] = new DeliveryRecord(pp);\r
+ }\r
+ return rv;\r
+ }\r
+ }\r
+ if (rtype.equals("EXP") && pp.length == 11) {\r
+ // Fields are: date|EXP|pubid|feedid|subid|requrl|method|ctype|clen|reason|attempts\r
+ ExpiryRecord e = new ExpiryRecord(pp);\r
+ if (e.getReason().equals("other"))\r
+ logger.info("Invalid reason '" + pp[9] + "' changed to 'other' for record: " + e.getPublishId());\r
+ return new Loadable[]{e};\r
+ }\r
+ if (rtype.equals("PBF") && pp.length == 12) {\r
+ // Fields are: date|PBF|pubid|feedid|requrl|method|ctype|clen-expected|clen-received|srcip|user|error\r
+ return new Loadable[]{new PubFailRecord(pp)};\r
+ }\r
+ if (rtype.equals("DLX") && pp.length == 7) {\r
+ // Fields are: date|DLX|pubid|feedid|subid|clen-tosend|clen-sent\r
+ return new Loadable[]{new DeliveryExtraRecord(pp)};\r
+ }\r
+ if (rtype.equals("LOG") && (pp.length == 19 || pp.length == 20)) {\r
+ // Fields are: date|LOG|pubid|feedid|requrl|method|ctype|clen|type|feedFileid|remoteAddr|user|status|subid|fileid|result|attempts|reason|record_id\r
+ return new Loadable[]{new LogRecord(pp)};\r
+ }\r
+ }\r
+ logger.warn("PROV8002 bad record: " + line);\r
+ return new Loadable[0];\r
+ }\r
\r
- /**\r
- * The LogfileLoader can be run stand-alone by invoking the main() method of this class.\r
- * @param a ignored\r
- * @throws InterruptedException\r
- */\r
- public static void main(String[] a) throws InterruptedException {\r
- LogfileLoader.getLoader();\r
- Thread.sleep(200000L);\r
- }\r
+ /**\r
+ * The LogfileLoader can be run stand-alone by invoking the main() method of this class.\r
+ *\r
+ * @param a ignored\r
+ * @throws InterruptedException\r
+ */\r
+ public static void main(String[] a) throws InterruptedException {\r
+ LogfileLoader.getLoader();\r
+ Thread.sleep(200000L);\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
/**\r
* This class provides a {@link TimerTask} that purges old logfiles\r
* (older than the number of days specified by the org.onap.dmaap.datarouter.provserver.logretention property).\r
+ *\r
* @author Robert Eby\r
* @version $Id: PurgeLogDirTask.java,v 1.2 2013/07/05 13:48:05 eby Exp $\r
*/\r
public class PurgeLogDirTask extends TimerTask {\r
- private static final long ONEDAY = 86400000L;\r
+ private static final long ONEDAY = 86400000L;\r
+\r
+ private final String logdir;\r
+ private final long interval;\r
\r
- private final String logdir;\r
- private final long interval;\r
+ public PurgeLogDirTask() {\r
+ Properties p = (new DB()).getProperties();\r
+ logdir = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");\r
+ String s = p.getProperty("org.onap.dmaap.datarouter.provserver.logretention", "30");\r
+ long n = 30;\r
+ try {\r
+ n = Long.parseLong(s);\r
+ } catch (NumberFormatException e) {\r
+ // ignore\r
+ }\r
+ interval = n * ONEDAY;\r
+ }\r
\r
- public PurgeLogDirTask() {\r
- Properties p = (new DB()).getProperties();\r
- logdir = p.getProperty("org.onap.dmaap.datarouter.provserver.accesslog.dir");\r
- String s = p.getProperty("org.onap.dmaap.datarouter.provserver.logretention", "30");\r
- long n = 30;\r
- try {\r
- n = Long.parseLong(s);\r
- } catch (NumberFormatException e) {\r
- // ignore\r
- }\r
- interval = n * ONEDAY;\r
- }\r
- @Override\r
- public void run() {\r
- try {\r
- File dir = new File(logdir);\r
- if (dir.exists()) {\r
- long exptime = System.currentTimeMillis() - interval;\r
- for (File logfile : dir.listFiles()) {\r
- if (logfile.lastModified() < exptime)\r
- logfile.delete();\r
- }\r
- }\r
- } catch (Exception e) {\r
- e.printStackTrace();\r
- }\r
- }\r
+ @Override\r
+ public void run() {\r
+ try {\r
+ File dir = new File(logdir);\r
+ if (dir.exists()) {\r
+ long exptime = System.currentTimeMillis() - interval;\r
+ for (File logfile : dir.listFiles()) {\r
+ if (logfile.lastModified() < exptime)\r
+ logfile.delete();\r
+ }\r
+ }\r
+ } catch (Exception e) {\r
+ e.printStackTrace();\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id$\r
*/\r
public class RLEBitSet {\r
- /**\r
- * Used to represent a continues set of <i>nbits</i> 1 bits starting at <i>start</i>.\r
- */\r
- private class RLE implements Comparable<RLE> {\r
- private final long start;\r
- private long nbits;\r
- public RLE(long from, long nbits) {\r
- this.start = from;\r
- this.nbits = (nbits > 0) ? nbits : 0;\r
- }\r
- /**\r
- * Returns the index of the first set bit in this RLE.\r
- * @return the index\r
- */\r
- public long firstBit() {\r
- return start;\r
- }\r
- /**\r
- * Returns the index of the last set bit in this RLE.\r
- * @return the index\r
- */\r
- public long lastBit() {\r
- return start+nbits-1;\r
- }\r
- public boolean intersects(RLE b2) {\r
- if (b2.lastBit() < this.firstBit())\r
- return false;\r
- if (b2.firstBit() > this.lastBit())\r
- return false;\r
- return true;\r
- }\r
- public boolean isSubset(RLE b2) {\r
- if (firstBit() < b2.firstBit())\r
- return false;\r
- if (firstBit() > b2.lastBit())\r
- return false;\r
- if (lastBit() < b2.firstBit())\r
- return false;\r
- if (lastBit() > b2.lastBit())\r
- return false;\r
- return true;\r
- }\r
- public RLE union(RLE b2) {\r
- RLE b1 = this;\r
- if (b1.firstBit() > b2.firstBit()) {\r
- b1 = b2;\r
- b2 = this;\r
- }\r
- long end = b1.lastBit();\r
- if (b2.lastBit() > b1.lastBit())\r
- end = b2.lastBit();\r
- return new RLE(b1.firstBit(), end-b1.firstBit()+1);\r
- }\r
- /**\r
- * Returns the number of bits set to {@code true} in this {@code RLE}.\r
- * @return the number of bits set to {@code true} in this {@code RLE}.\r
- */\r
- public int cardinality() {\r
- return (int) nbits;\r
- }\r
- @Override\r
- public int compareTo(RLE o) {\r
- if (this.equals(o))\r
- return 0;\r
- return (start < o.start) ? -1 : 1;\r
- }\r
- @Override\r
- public boolean equals(Object obj) {\r
- if (obj instanceof RLE) {\r
- RLE b = (RLE) obj;\r
- return (start == b.start) && (nbits == b.nbits);\r
- }\r
- return false;\r
- }\r
- @Override\r
- public int hashCode() {\r
- return new Long(start ^ nbits).hashCode();\r
- }\r
- @Override\r
- public String toString() {\r
- return "["+firstBit()+".."+lastBit()+"]";\r
- }\r
- }\r
- private SortedSet<RLE> bitsets;\r
-\r
- /**\r
- * Creates a new bit set. All bits are initially <code>false</code>.\r
- */\r
- public RLEBitSet() {\r
- bitsets = new TreeSet<RLE>();\r
- }\r
- /**\r
- * Creates a new bit set, with bits set according to the value of <code>s</code>.\r
- * @param s the initialization String\r
- */\r
- public RLEBitSet(String s) {\r
- bitsets = new TreeSet<RLE>();\r
- set(s);\r
- }\r
- /**\r
- * Returns the "logical size" of this {@code RLEBitSet}: the index of the highest set bit\r
- * in the {@code RLEBitSet} plus one. Returns zero if the {@code RLEBitSet} contains no set bits.\r
- * @return the logical size of this {@code RLEBitSet}\r
- */\r
- public long length() {\r
- if (isEmpty())\r
- return 0;\r
- return bitsets.last().lastBit()+1;\r
- }\r
- /**\r
- * Returns the value of the bit with the specified index. The value is {@code true} if the bit\r
- * with the index bit is currently set in this BitSet; otherwise, the result is {@code false}.\r
- * @param bit the bit index\r
- * @return the value of the bit with the specified index\r
- */\r
- public boolean get(long bit) {\r
- synchronized (bitsets) {\r
- for (RLE bs : bitsets) {\r
- if (bit >= bs.firstBit() && bit <= bs.lastBit())\r
- return true;\r
- }\r
- }\r
- return false;\r
- }\r
- /**\r
- * Set one or more bits to true, based on the value of <code>s</code>.\r
- * @param s the initialization String, which consists of a comma or space separated list of\r
- * non-negative numbers and ranges. An individual number represents the bit index to set.\r
- * A range (two numbers separated by a dash) causes all bit indexes between the two numbers\r
- * (inclusive) to be set.\r
- * @exception NumberFormatException - if a number is incorrectly formatted\r
- * @exception IndexOutOfBoundsException - if an index is negative\r
- */\r
- public void set(String s) throws NumberFormatException {\r
- s = s.trim();\r
- if (!s.isEmpty()) {\r
- for (String s2 : s.split("[, \n]+")) {\r
- if (s2.indexOf('-') >= 0) {\r
- String[] pp = s2.split("-");\r
- long f = Long.parseLong(pp[0]);\r
- long t = Long.parseLong(pp[1]);\r
- set(f, t+1);\r
- } else\r
- set(Long.parseLong(s2));\r
- }\r
- }\r
- }\r
- /**\r
- * Sets the bit at the specified index to {@code true}.\r
- * @param bit a bit index\r
- */\r
- public void set(long bit) {\r
- set(bit, bit+1);\r
- }\r
- /**\r
- * Sets the bits from the specified {@code from} (inclusive) to the\r
- * specified {@code to} (exclusive) to {@code true}.\r
- * @param from index of the first bit to be set\r
- * @param to index after the last bit to be set\r
- * @throws IndexOutOfBoundsException if {@code from} is negative,\r
- * or {@code to} is negative,\r
- * or {@code from} is larger than {@code to}\r
- */\r
- public void set(long from, long to) {\r
- checkRange(from, to);\r
- RLE newbits = new RLE(from, to-from);\r
- synchronized (bitsets) {\r
- for (RLE bs : bitsets) {\r
- if (bs.intersects(newbits)) {\r
- if (!newbits.isSubset(bs)) {\r
- bitsets.remove(bs);\r
- bitsets.add(newbits.union(bs));\r
- coalesce();\r
- }\r
- return;\r
- }\r
- }\r
- bitsets.add(newbits);\r
- }\r
- coalesce();\r
- }\r
- /**\r
- * Sets all of the bits in this BitSet to {@code false}.\r
- */\r
- public void clear() {\r
- synchronized (bitsets) {\r
- bitsets.clear();\r
- }\r
- }\r
- /**\r
- * Sets the bit specified by the index to {@code false}.\r
- * @param bit the index of the bit to be cleared\r
- */\r
- public void clear(long bit) {\r
- clear(bit, bit+1);\r
- }\r
- /**\r
- * Sets the bits from the specified {@code from} (inclusive) to the\r
- * specified {@code to} (exclusive) to {@code false}.\r
- * @param from index of the first bit to be cleared\r
- * @param to index after the last bit to be cleared\r
- * @throws IndexOutOfBoundsException if {@code from} is negative,\r
- * or {@code to} is negative,\r
- * or {@code from} is larger than {@code to}\r
- */\r
- public void clear(long from, long to) {\r
- checkRange(from, to);\r
- RLE newbits = new RLE(from, to-from);\r
- List<RLE> newranges = new ArrayList<RLE>();\r
- synchronized (bitsets) {\r
- for (RLE bs : bitsets) {\r
- if (bs.intersects(newbits)) {\r
- // preserve the bits that are not being cleared\r
- long len = newbits.firstBit() - bs.firstBit();\r
- if (len > 0)\r
- newranges.add(new RLE(bs.firstBit(), len));\r
- len = bs.lastBit() - newbits.lastBit();\r
- if (len > 0)\r
- newranges.add(new RLE(newbits.lastBit()+1, len));\r
- bs.nbits = 0;\r
- }\r
- }\r
- if (!newranges.isEmpty()) {\r
- for (RLE bs : newranges) {\r
- bitsets.add(bs);\r
- }\r
- }\r
- }\r
- coalesce();\r
- }\r
- /** Combine abutting RLEBitSets, and remove 0 length RLEBitSets. */\r
- private void coalesce() {\r
- RLE last = null;\r
- synchronized (bitsets) {\r
- Iterator<RLE> iter = bitsets.iterator();\r
- while (iter.hasNext()) {\r
- RLE bs = iter.next();\r
- if (last != null && (last.lastBit()+1 == bs.firstBit())) {\r
- last.nbits += bs.nbits;\r
- iter.remove();\r
- } else if (bs.nbits == 0) {\r
- iter.remove();\r
- } else {\r
- last = bs;\r
- }\r
- }\r
- }\r
- }\r
- /**\r
- * Checks that fromIndex ... toIndex is a valid range of bit indices.\r
- */\r
- private static void checkRange(long from, long to) {\r
- if (from < 0)\r
- throw new IndexOutOfBoundsException("fromIndex < 0: " + from);\r
- if (to < 0)\r
- throw new IndexOutOfBoundsException("toIndex < 0: " + to);\r
- if (from > to)\r
- throw new IndexOutOfBoundsException("fromIndex: " + from + " > toIndex: " + to);\r
- }\r
- /**\r
- * Performs a logical <b>AND</b> of this target bit set with the argument bit set.\r
- * This bit set is modified so that each bit in it has the value {@code true} if and only if\r
- * it both initially had the value {@code true} and the corresponding bit in the bit set\r
- * argument also had the value {@code true}.\r
- * @param set a {@code RLEBitSet}\r
- */\r
- public void and(RLEBitSet set) {\r
- long last = 0;\r
- synchronized (set.bitsets) {\r
- for (RLE bs : set.bitsets) {\r
- clear(last, bs.start);\r
- last = bs.start + bs.nbits;\r
- }\r
- }\r
- clear(last, Long.MAX_VALUE);\r
- }\r
- /**\r
- * Clears all of the bits in this {@code RLEBitSet} whose corresponding bit is set in\r
- * the specified {@code RLEBitSet}.\r
- * @param set the {@code RLEBitSet} with which to mask this {@code RLEBitSet}\r
- */\r
- public void andNot(RLEBitSet set) {\r
- synchronized (set.bitsets) {\r
- for (RLE bs : set.bitsets) {\r
- clear(bs.start, bs.start + bs.nbits);\r
- }\r
- }\r
- }\r
- /**\r
- * Returns true if this {@code RLEBitSet} contains no bits that are set\r
- * to {@code true}.\r
- *\r
- * @return boolean indicating whether this {@code BitSet} is empty\r
- */\r
- public boolean isEmpty() {\r
- return bitsets.isEmpty();\r
- }\r
- /**\r
- * Returns the number of bits set to {@code true} in this {@code RLEBitSet}.\r
- * @return the number of bits set to {@code true} in this {@code RLEBitSet}.\r
- */\r
- public int cardinality() {\r
- int n = 0;\r
- synchronized (bitsets) {\r
- for (RLE bs : bitsets) {\r
- n += bs.cardinality();\r
- }\r
- }\r
- return n;\r
- }\r
- /**\r
- * Cloning this RLEBitSet produces a new RLEBitSet that is equal to it. The clone of the\r
- * bit set is another bit set that has exactly the same bits set to true as this bit set.\r
- * @return a clone of this bit set\r
- */\r
- public Object clone() {\r
- RLEBitSet rv = new RLEBitSet();\r
- synchronized (bitsets) {\r
- for (RLE bs : bitsets) {\r
- rv.bitsets.add(new RLE(bs.start, bs.nbits));\r
- }\r
- }\r
- return rv;\r
- }\r
- /**\r
- * Returns a string representation of this bit set, using the same notation as is required for\r
- * the String constructor. For every index for which this {@code RLEBitSet} contains a bit in\r
- * the set state, the decimal representation of that index is included in the result. Such\r
- * indices are listed in order from lowest to highest, separated by ",". Ranges of set bits are\r
- * indicated by <i>lobit</i>-<i>hibit</i>.\r
- * @return the String\r
- */\r
- @Override\r
- public String toString() {\r
- StringBuilder sb = new StringBuilder();\r
- String prefix = "";\r
- synchronized (bitsets) {\r
- for (RLE bs : bitsets) {\r
- sb.append(prefix);\r
- prefix = ",";\r
- long s = bs.firstBit();\r
- long e = bs.lastBit();\r
- sb.append(s);\r
- if (s != e)\r
- sb.append('-').append(e);\r
- }\r
- }\r
- return sb.toString();\r
- }\r
- /**\r
- * Return an Iterator which provides pairs of {@code Long}s representing the beginning and\r
- * ending index of a range of set bits in this {@code RLEBitSet}.\r
- * @return the Iterator\r
- */\r
- public Iterator<Long[]> getRangeIterator() {\r
- return new Iterator<Long[]>() {\r
- private Iterator<RLE> i = bitsets.iterator();\r
-\r
- @Override\r
- public boolean hasNext() {\r
- return i.hasNext();\r
- }\r
-\r
- @Override\r
- public Long[] next() {\r
- RLE bs = i.next();\r
- return new Long[] { bs.firstBit(), bs.lastBit() };\r
- }\r
-\r
- @Override\r
- public void remove() {\r
- throw new UnsupportedOperationException();\r
- }\r
- };\r
- }\r
+ /**\r
+ * Used to represent a continues set of <i>nbits</i> 1 bits starting at <i>start</i>.\r
+ */\r
+ private class RLE implements Comparable<RLE> {\r
+ private final long start;\r
+ private long nbits;\r
+\r
+ public RLE(long from, long nbits) {\r
+ this.start = from;\r
+ this.nbits = (nbits > 0) ? nbits : 0;\r
+ }\r
+\r
+ /**\r
+ * Returns the index of the first set bit in this RLE.\r
+ *\r
+ * @return the index\r
+ */\r
+ public long firstBit() {\r
+ return start;\r
+ }\r
+\r
+ /**\r
+ * Returns the index of the last set bit in this RLE.\r
+ *\r
+ * @return the index\r
+ */\r
+ public long lastBit() {\r
+ return start + nbits - 1;\r
+ }\r
+\r
+ public boolean intersects(RLE b2) {\r
+ if (b2.lastBit() < this.firstBit())\r
+ return false;\r
+ if (b2.firstBit() > this.lastBit())\r
+ return false;\r
+ return true;\r
+ }\r
+\r
+ public boolean isSubset(RLE b2) {\r
+ if (firstBit() < b2.firstBit())\r
+ return false;\r
+ if (firstBit() > b2.lastBit())\r
+ return false;\r
+ if (lastBit() < b2.firstBit())\r
+ return false;\r
+ if (lastBit() > b2.lastBit())\r
+ return false;\r
+ return true;\r
+ }\r
+\r
+ public RLE union(RLE b2) {\r
+ RLE b1 = this;\r
+ if (b1.firstBit() > b2.firstBit()) {\r
+ b1 = b2;\r
+ b2 = this;\r
+ }\r
+ long end = b1.lastBit();\r
+ if (b2.lastBit() > b1.lastBit())\r
+ end = b2.lastBit();\r
+ return new RLE(b1.firstBit(), end - b1.firstBit() + 1);\r
+ }\r
+\r
+ /**\r
+ * Returns the number of bits set to {@code true} in this {@code RLE}.\r
+ *\r
+ * @return the number of bits set to {@code true} in this {@code RLE}.\r
+ */\r
+ public int cardinality() {\r
+ return (int) nbits;\r
+ }\r
+\r
+ @Override\r
+ public int compareTo(RLE o) {\r
+ if (this.equals(o))\r
+ return 0;\r
+ return (start < o.start) ? -1 : 1;\r
+ }\r
+\r
+ @Override\r
+ public boolean equals(Object obj) {\r
+ if (obj instanceof RLE) {\r
+ RLE b = (RLE) obj;\r
+ return (start == b.start) && (nbits == b.nbits);\r
+ }\r
+ return false;\r
+ }\r
+\r
+ @Override\r
+ public int hashCode() {\r
+ return new Long(start ^ nbits).hashCode();\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return "[" + firstBit() + ".." + lastBit() + "]";\r
+ }\r
+ }\r
+\r
+ private SortedSet<RLE> bitsets;\r
+\r
+ /**\r
+ * Creates a new bit set. All bits are initially <code>false</code>.\r
+ */\r
+ public RLEBitSet() {\r
+ bitsets = new TreeSet<RLE>();\r
+ }\r
+\r
+ /**\r
+ * Creates a new bit set, with bits set according to the value of <code>s</code>.\r
+ *\r
+ * @param s the initialization String\r
+ */\r
+ public RLEBitSet(String s) {\r
+ bitsets = new TreeSet<RLE>();\r
+ set(s);\r
+ }\r
+\r
+ /**\r
+ * Returns the "logical size" of this {@code RLEBitSet}: the index of the highest set bit\r
+ * in the {@code RLEBitSet} plus one. Returns zero if the {@code RLEBitSet} contains no set bits.\r
+ *\r
+ * @return the logical size of this {@code RLEBitSet}\r
+ */\r
+ public long length() {\r
+ if (isEmpty())\r
+ return 0;\r
+ return bitsets.last().lastBit() + 1;\r
+ }\r
+\r
+ /**\r
+ * Returns the value of the bit with the specified index. The value is {@code true} if the bit\r
+ * with the index bit is currently set in this BitSet; otherwise, the result is {@code false}.\r
+ *\r
+ * @param bit the bit index\r
+ * @return the value of the bit with the specified index\r
+ */\r
+ public boolean get(long bit) {\r
+ synchronized (bitsets) {\r
+ for (RLE bs : bitsets) {\r
+ if (bit >= bs.firstBit() && bit <= bs.lastBit())\r
+ return true;\r
+ }\r
+ }\r
+ return false;\r
+ }\r
+\r
+ /**\r
+ * Set one or more bits to true, based on the value of <code>s</code>.\r
+ *\r
+ * @param s the initialization String, which consists of a comma or space separated list of\r
+ * non-negative numbers and ranges. An individual number represents the bit index to set.\r
+ * A range (two numbers separated by a dash) causes all bit indexes between the two numbers\r
+ * (inclusive) to be set.\r
+ * @throws NumberFormatException - if a number is incorrectly formatted\r
+ * @throws IndexOutOfBoundsException - if an index is negative\r
+ */\r
+ public void set(String s) throws NumberFormatException {\r
+ s = s.trim();\r
+ if (!s.isEmpty()) {\r
+ for (String s2 : s.split("[, \n]+")) {\r
+ if (s2.indexOf('-') >= 0) {\r
+ String[] pp = s2.split("-");\r
+ long f = Long.parseLong(pp[0]);\r
+ long t = Long.parseLong(pp[1]);\r
+ set(f, t + 1);\r
+ } else\r
+ set(Long.parseLong(s2));\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Sets the bit at the specified index to {@code true}.\r
+ *\r
+ * @param bit a bit index\r
+ */\r
+ public void set(long bit) {\r
+ set(bit, bit + 1);\r
+ }\r
+\r
+ /**\r
+ * Sets the bits from the specified {@code from} (inclusive) to the\r
+ * specified {@code to} (exclusive) to {@code true}.\r
+ *\r
+ * @param from index of the first bit to be set\r
+ * @param to index after the last bit to be set\r
+ * @throws IndexOutOfBoundsException if {@code from} is negative,\r
+ * or {@code to} is negative,\r
+ * or {@code from} is larger than {@code to}\r
+ */\r
+ public void set(long from, long to) {\r
+ checkRange(from, to);\r
+ RLE newbits = new RLE(from, to - from);\r
+ synchronized (bitsets) {\r
+ for (RLE bs : bitsets) {\r
+ if (bs.intersects(newbits)) {\r
+ if (!newbits.isSubset(bs)) {\r
+ bitsets.remove(bs);\r
+ bitsets.add(newbits.union(bs));\r
+ coalesce();\r
+ }\r
+ return;\r
+ }\r
+ }\r
+ bitsets.add(newbits);\r
+ }\r
+ coalesce();\r
+ }\r
+\r
+ /**\r
+ * Sets all of the bits in this BitSet to {@code false}.\r
+ */\r
+ public void clear() {\r
+ synchronized (bitsets) {\r
+ bitsets.clear();\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Sets the bit specified by the index to {@code false}.\r
+ *\r
+ * @param bit the index of the bit to be cleared\r
+ */\r
+ public void clear(long bit) {\r
+ clear(bit, bit + 1);\r
+ }\r
+\r
+ /**\r
+ * Sets the bits from the specified {@code from} (inclusive) to the\r
+ * specified {@code to} (exclusive) to {@code false}.\r
+ *\r
+ * @param from index of the first bit to be cleared\r
+ * @param to index after the last bit to be cleared\r
+ * @throws IndexOutOfBoundsException if {@code from} is negative,\r
+ * or {@code to} is negative,\r
+ * or {@code from} is larger than {@code to}\r
+ */\r
+ public void clear(long from, long to) {\r
+ checkRange(from, to);\r
+ RLE newbits = new RLE(from, to - from);\r
+ List<RLE> newranges = new ArrayList<RLE>();\r
+ synchronized (bitsets) {\r
+ for (RLE bs : bitsets) {\r
+ if (bs.intersects(newbits)) {\r
+ // preserve the bits that are not being cleared\r
+ long len = newbits.firstBit() - bs.firstBit();\r
+ if (len > 0)\r
+ newranges.add(new RLE(bs.firstBit(), len));\r
+ len = bs.lastBit() - newbits.lastBit();\r
+ if (len > 0)\r
+ newranges.add(new RLE(newbits.lastBit() + 1, len));\r
+ bs.nbits = 0;\r
+ }\r
+ }\r
+ if (!newranges.isEmpty()) {\r
+ for (RLE bs : newranges) {\r
+ bitsets.add(bs);\r
+ }\r
+ }\r
+ }\r
+ coalesce();\r
+ }\r
+\r
+ /**\r
+ * Combine abutting RLEBitSets, and remove 0 length RLEBitSets.\r
+ */\r
+ private void coalesce() {\r
+ RLE last = null;\r
+ synchronized (bitsets) {\r
+ Iterator<RLE> iter = bitsets.iterator();\r
+ while (iter.hasNext()) {\r
+ RLE bs = iter.next();\r
+ if (last != null && (last.lastBit() + 1 == bs.firstBit())) {\r
+ last.nbits += bs.nbits;\r
+ iter.remove();\r
+ } else if (bs.nbits == 0) {\r
+ iter.remove();\r
+ } else {\r
+ last = bs;\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Checks that fromIndex ... toIndex is a valid range of bit indices.\r
+ */\r
+ private static void checkRange(long from, long to) {\r
+ if (from < 0)\r
+ throw new IndexOutOfBoundsException("fromIndex < 0: " + from);\r
+ if (to < 0)\r
+ throw new IndexOutOfBoundsException("toIndex < 0: " + to);\r
+ if (from > to)\r
+ throw new IndexOutOfBoundsException("fromIndex: " + from + " > toIndex: " + to);\r
+ }\r
+\r
+ /**\r
+ * Performs a logical <b>AND</b> of this target bit set with the argument bit set.\r
+ * This bit set is modified so that each bit in it has the value {@code true} if and only if\r
+ * it both initially had the value {@code true} and the corresponding bit in the bit set\r
+ * argument also had the value {@code true}.\r
+ *\r
+ * @param set a {@code RLEBitSet}\r
+ */\r
+ public void and(RLEBitSet set) {\r
+ long last = 0;\r
+ synchronized (set.bitsets) {\r
+ for (RLE bs : set.bitsets) {\r
+ clear(last, bs.start);\r
+ last = bs.start + bs.nbits;\r
+ }\r
+ }\r
+ clear(last, Long.MAX_VALUE);\r
+ }\r
+\r
+ /**\r
+ * Clears all of the bits in this {@code RLEBitSet} whose corresponding bit is set in\r
+ * the specified {@code RLEBitSet}.\r
+ *\r
+ * @param set the {@code RLEBitSet} with which to mask this {@code RLEBitSet}\r
+ */\r
+ public void andNot(RLEBitSet set) {\r
+ synchronized (set.bitsets) {\r
+ for (RLE bs : set.bitsets) {\r
+ clear(bs.start, bs.start + bs.nbits);\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Returns true if this {@code RLEBitSet} contains no bits that are set\r
+ * to {@code true}.\r
+ *\r
+ * @return boolean indicating whether this {@code BitSet} is empty\r
+ */\r
+ public boolean isEmpty() {\r
+ return bitsets.isEmpty();\r
+ }\r
+\r
+ /**\r
+ * Returns the number of bits set to {@code true} in this {@code RLEBitSet}.\r
+ *\r
+ * @return the number of bits set to {@code true} in this {@code RLEBitSet}.\r
+ */\r
+ public int cardinality() {\r
+ int n = 0;\r
+ synchronized (bitsets) {\r
+ for (RLE bs : bitsets) {\r
+ n += bs.cardinality();\r
+ }\r
+ }\r
+ return n;\r
+ }\r
+\r
+ /**\r
+ * Cloning this RLEBitSet produces a new RLEBitSet that is equal to it. The clone of the\r
+ * bit set is another bit set that has exactly the same bits set to true as this bit set.\r
+ *\r
+ * @return a clone of this bit set\r
+ */\r
+ public Object clone() {\r
+ RLEBitSet rv = new RLEBitSet();\r
+ synchronized (bitsets) {\r
+ for (RLE bs : bitsets) {\r
+ rv.bitsets.add(new RLE(bs.start, bs.nbits));\r
+ }\r
+ }\r
+ return rv;\r
+ }\r
+\r
+ /**\r
+ * Returns a string representation of this bit set, using the same notation as is required for\r
+ * the String constructor. For every index for which this {@code RLEBitSet} contains a bit in\r
+ * the set state, the decimal representation of that index is included in the result. Such\r
+ * indices are listed in order from lowest to highest, separated by ",". Ranges of set bits are\r
+ * indicated by <i>lobit</i>-<i>hibit</i>.\r
+ *\r
+ * @return the String\r
+ */\r
+ @Override\r
+ public String toString() {\r
+ StringBuilder sb = new StringBuilder();\r
+ String prefix = "";\r
+ synchronized (bitsets) {\r
+ for (RLE bs : bitsets) {\r
+ sb.append(prefix);\r
+ prefix = ",";\r
+ long s = bs.firstBit();\r
+ long e = bs.lastBit();\r
+ sb.append(s);\r
+ if (s != e)\r
+ sb.append('-').append(e);\r
+ }\r
+ }\r
+ return sb.toString();\r
+ }\r
+\r
+ /**\r
+ * Return an Iterator which provides pairs of {@code Long}s representing the beginning and\r
+ * ending index of a range of set bits in this {@code RLEBitSet}.\r
+ *\r
+ * @return the Iterator\r
+ */\r
+ public Iterator<Long[]> getRangeIterator() {\r
+ return new Iterator<Long[]>() {\r
+ private Iterator<RLE> i = bitsets.iterator();\r
+\r
+ @Override\r
+ public boolean hasNext() {\r
+ return i.hasNext();\r
+ }\r
+\r
+ @Override\r
+ public Long[] next() {\r
+ RLE bs = i.next();\r
+ return new Long[]{bs.firstBit(), bs.lastBit()};\r
+ }\r
+\r
+ @Override\r
+ public void remove() {\r
+ throw new UnsupportedOperationException();\r
+ }\r
+ };\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* <tr><td>(no value)</td><td>filter disabled</td></tr>\r
* <tr><td>off</td><td>filter disabled</td></tr>\r
* <tr><td>N[,M[,action]]</td><td>set N, M, and action (used in the algorithm below).\r
- * Action is <i>drop</i> or <i>throttle</i>.\r
- * If M is missing, it defaults to 5 minutes.\r
- * If the action is missing, it defaults to <i>drop</i>.\r
+ * Action is <i>drop</i> or <i>throttle</i>.\r
+ * If M is missing, it defaults to 5 minutes.\r
+ * If the action is missing, it defaults to <i>drop</i>.\r
* </td></tr>\r
* </table>\r
* <p>\r
* <ol>\r
* <li>the filter is enabled, and</li>\r
* <li>N /publish requests come to the provisioning server in M minutes\r
- * <ol>\r
- * <li>from the same IP address</li>\r
- * <li>for the same feed</li>\r
- * <li>lacking the <i>Expect: 100-continue</i> header</li>\r
- * </ol>\r
+ * <ol>\r
+ * <li>from the same IP address</li>\r
+ * <li>for the same feed</li>\r
+ * <li>lacking the <i>Expect: 100-continue</i> header</li>\r
+ * </ol>\r
* </li>\r
* </ol>\r
* The action that can be performed (if triggered) are:\r
* <ol>\r
* <li><i>drop</i> - the connection is dropped immediately.</li>\r
* <li><i>throttle</i> - [not supported] the connection is put into a low priority queue with all other throttled connections.\r
- * These are then processed at a slower rate. Note: this option does not work correctly, and is disabled.\r
- * The only action that is supported is <i>drop</i>.\r
+ * These are then processed at a slower rate. Note: this option does not work correctly, and is disabled.\r
+ * The only action that is supported is <i>drop</i>.\r
* </li>\r
* </ol>\r
*\r
* @version $Id: ThrottleFilter.java,v 1.2 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class ThrottleFilter extends TimerTask implements Filter {\r
- public static final int DEFAULT_N = 10;\r
- public static final int DEFAULT_M = 5;\r
- public static final String THROTTLE_MARKER = "org.onap.dmaap.datarouter.provisioning.THROTTLE_MARKER";\r
- private static final String JETTY_REQUEST = "org.eclipse.jetty.server.Request";\r
- private static final long ONE_MINUTE = 60000L;\r
- private static final int ACTION_DROP = 0;\r
- private static final int ACTION_THROTTLE = 1;\r
+ public static final int DEFAULT_N = 10;\r
+ public static final int DEFAULT_M = 5;\r
+ public static final String THROTTLE_MARKER = "org.onap.dmaap.datarouter.provisioning.THROTTLE_MARKER";\r
+ private static final String JETTY_REQUEST = "org.eclipse.jetty.server.Request";\r
+ private static final long ONE_MINUTE = 60000L;\r
+ private static final int ACTION_DROP = 0;\r
+ private static final int ACTION_THROTTLE = 1;\r
+\r
+ // Configuration\r
+ private static boolean enabled = false; // enabled or not\r
+ private static int n_requests = 0; // number of requests in M minutes\r
+ private static int m_minutes = 0; // sampling period\r
+ private static int action = ACTION_DROP; // action to take (throttle or drop)\r
+\r
+ private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
+ private static Map<String, Counter> map = new HashMap<String, Counter>();\r
+ private static final Timer rolex = new Timer();\r
+\r
+ @Override\r
+ public void init(FilterConfig arg0) throws ServletException {\r
+ configure();\r
+ rolex.scheduleAtFixedRate(this, 5 * 60000L, 5 * 60000L); // Run once every 5 minutes to clean map\r
+ }\r
+\r
+ /**\r
+ * Configure the throttle. This should be called from BaseServlet.provisioningParametersChanged(), to make sure it stays up to date.\r
+ */\r
+ public static void configure() {\r
+ Parameters p = Parameters.getParameter(Parameters.THROTTLE_FILTER);\r
+ if (p != null) {\r
+ try {\r
+ Class.forName(JETTY_REQUEST);\r
+ String v = p.getValue();\r
+ if (v != null && !v.equals("off")) {\r
+ String[] pp = v.split(",");\r
+ if (pp != null) {\r
+ n_requests = (pp.length > 0) ? getInt(pp[0], DEFAULT_N) : DEFAULT_N;\r
+ m_minutes = (pp.length > 1) ? getInt(pp[1], DEFAULT_M) : DEFAULT_M;\r
+ action = (pp.length > 2 && pp[2] != null && pp[2].equalsIgnoreCase("throttle")) ? ACTION_THROTTLE : ACTION_DROP;\r
+ enabled = true;\r
+ // ACTION_THROTTLE is not currently working, so is not supported\r
+ if (action == ACTION_THROTTLE) {\r
+ action = ACTION_DROP;\r
+ logger.info("Throttling is not currently supported; action changed to DROP");\r
+ }\r
+ logger.info("ThrottleFilter is ENABLED for /publish requests; N=" + n_requests + ", M=" + m_minutes + ", Action=" + action);\r
+ return;\r
+ }\r
+ }\r
+ } catch (ClassNotFoundException e) {\r
+ logger.warn("Class " + JETTY_REQUEST + " is not available; this filter requires Jetty.");\r
+ }\r
+ }\r
+ logger.info("ThrottleFilter is DISABLED for /publish requests.");\r
+ enabled = false;\r
+ map.clear();\r
+ }\r
+\r
+ private static int getInt(String s, int deflt) {\r
+ try {\r
+ return Integer.parseInt(s);\r
+ } catch (NumberFormatException x) {\r
+ return deflt;\r
+ }\r
+ }\r
+\r
+ @Override\r
+ public void destroy() {\r
+ rolex.cancel();\r
+ map.clear();\r
+ }\r
+\r
+ @Override\r
+ public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)\r
+ throws IOException, ServletException {\r
+ if (enabled && action == ACTION_THROTTLE) {\r
+ throttleFilter((HttpServletRequest) request, (HttpServletResponse) response, chain);\r
+ } else if (enabled) {\r
+ dropFilter((HttpServletRequest) request, (HttpServletResponse) response, chain);\r
+ } else {\r
+ chain.doFilter(request, response);\r
+ }\r
+ }\r
+\r
+ public void dropFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain)\r
+ throws IOException, ServletException {\r
+ int rate = getRequestRate((HttpServletRequest) request);\r
+ if (rate >= n_requests) {\r
+ // drop request - only works under Jetty\r
+ String m = String.format("Dropping connection: %s %d bad connections in %d minutes", getConnectionId((HttpServletRequest) request), rate, m_minutes);\r
+ logger.info(m);\r
+ Request base_request = (request instanceof Request)\r
+ ? (Request) request\r
+ : AbstractHttpConnection.getCurrentConnection().getRequest();\r
+ base_request.getConnection().getEndPoint().close();\r
+ } else {\r
+ chain.doFilter(request, response);\r
+ }\r
+ }\r
+\r
+ public void throttleFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain)\r
+ throws IOException, ServletException {\r
+ // throttle request\r
+ String id = getConnectionId((HttpServletRequest) request);\r
+ int rate = getRequestRate((HttpServletRequest) request);\r
+ Object results = request.getAttribute(THROTTLE_MARKER);\r
+ if (rate >= n_requests && results == null) {\r
+ String m = String.format("Throttling connection: %s %d bad connections in %d minutes", getConnectionId((HttpServletRequest) request), rate, m_minutes);\r
+ logger.info(m);\r
+ Continuation continuation = ContinuationSupport.getContinuation(request);\r
+ continuation.suspend();\r
+ register(id, continuation);\r
+ continuation.undispatch();\r
+ } else {\r
+ chain.doFilter(request, response);\r
+ @SuppressWarnings("resource")\r
+ InputStream is = request.getInputStream();\r
+ byte[] b = new byte[4096];\r
+ int n = is.read(b);\r
+ while (n > 0) {\r
+ n = is.read(b);\r
+ }\r
+ resume(id);\r
+ }\r
+ }\r
+\r
+ private Map<String, List<Continuation>> suspended_requests = new HashMap<String, List<Continuation>>();\r
\r
- // Configuration\r
- private static boolean enabled = false; // enabled or not\r
- private static int n_requests = 0; // number of requests in M minutes\r
- private static int m_minutes = 0; // sampling period\r
- private static int action = ACTION_DROP; // action to take (throttle or drop)\r
+ private void register(String id, Continuation continuation) {\r
+ synchronized (suspended_requests) {\r
+ List<Continuation> list = suspended_requests.get(id);\r
+ if (list == null) {\r
+ list = new ArrayList<Continuation>();\r
+ suspended_requests.put(id, list);\r
+ }\r
+ list.add(continuation);\r
+ }\r
+ }\r
\r
- private static Logger logger = Logger.getLogger("org.onap.dmaap.datarouter.provisioning.internal");\r
- private static Map<String, Counter> map = new HashMap<String, Counter>();\r
- private static final Timer rolex = new Timer();\r
+ private void resume(String id) {\r
+ synchronized (suspended_requests) {\r
+ List<Continuation> list = suspended_requests.get(id);\r
+ if (list != null) {\r
+ // when the waited for event happens\r
+ Continuation continuation = list.remove(0);\r
+ continuation.setAttribute(ThrottleFilter.THROTTLE_MARKER, new Object());\r
+ continuation.resume();\r
+ }\r
+ }\r
+ }\r
\r
- @Override\r
- public void init(FilterConfig arg0) throws ServletException {\r
- configure();\r
- rolex.scheduleAtFixedRate(this, 5*60000L, 5*60000L); // Run once every 5 minutes to clean map\r
- }\r
+ /**\r
+ * Return a count of number of requests in the last M minutes, iff this is a "bad" request.\r
+ * If the request has been resumed (if it contains the THROTTLE_MARKER) it is considered good.\r
+ *\r
+ * @param request the request\r
+ * @return number of requests in the last M minutes, 0 means it is a "good" request\r
+ */\r
+ private int getRequestRate(HttpServletRequest request) {\r
+ String expecthdr = request.getHeader("Expect");\r
+ if (expecthdr != null && expecthdr.equalsIgnoreCase("100-continue"))\r
+ return 0;\r
\r
- /**\r
- * Configure the throttle. This should be called from BaseServlet.provisioningParametersChanged(), to make sure it stays up to date.\r
- */\r
- public static void configure() {\r
- Parameters p = Parameters.getParameter(Parameters.THROTTLE_FILTER);\r
- if (p != null) {\r
- try {\r
- Class.forName(JETTY_REQUEST);\r
- String v = p.getValue();\r
- if (v != null && !v.equals("off")) {\r
- String[] pp = v.split(",");\r
- if (pp != null) {\r
- n_requests = (pp.length > 0) ? getInt(pp[0], DEFAULT_N) : DEFAULT_N;\r
- m_minutes = (pp.length > 1) ? getInt(pp[1], DEFAULT_M) : DEFAULT_M;\r
- action = (pp.length > 2 && pp[2] != null && pp[2].equalsIgnoreCase("throttle")) ? ACTION_THROTTLE : ACTION_DROP;\r
- enabled = true;\r
- // ACTION_THROTTLE is not currently working, so is not supported\r
- if (action == ACTION_THROTTLE) {\r
- action = ACTION_DROP;\r
- logger.info("Throttling is not currently supported; action changed to DROP");\r
- }\r
- logger.info("ThrottleFilter is ENABLED for /publish requests; N="+n_requests+", M="+m_minutes+", Action="+action);\r
- return;\r
- }\r
- }\r
- } catch (ClassNotFoundException e) {\r
- logger.warn("Class "+JETTY_REQUEST+" is not available; this filter requires Jetty.");\r
- }\r
- }\r
- logger.info("ThrottleFilter is DISABLED for /publish requests.");\r
- enabled = false;\r
- map.clear();\r
- }\r
- private static int getInt(String s, int deflt) {\r
- try {\r
- return Integer.parseInt(s);\r
- } catch (NumberFormatException x) {\r
- return deflt;\r
- }\r
- }\r
- @Override\r
- public void destroy() {\r
- rolex.cancel();\r
- map.clear();\r
- }\r
+ String key = getConnectionId(request);\r
+ synchronized (map) {\r
+ Counter cnt = map.get(key);\r
+ if (cnt == null) {\r
+ cnt = new Counter();\r
+ map.put(key, cnt);\r
+ }\r
+ int n = cnt.getRequestRate();\r
+ return n;\r
+ }\r
+ }\r
\r
- @Override\r
- public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)\r
- throws IOException, ServletException\r
- {\r
- if (enabled && action == ACTION_THROTTLE) {\r
- throttleFilter((HttpServletRequest) request, (HttpServletResponse) response, chain);\r
- } else if (enabled) {\r
- dropFilter((HttpServletRequest) request, (HttpServletResponse) response, chain);\r
- } else {\r
- chain.doFilter(request, response);\r
- }\r
- }\r
- public void dropFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain)\r
- throws IOException, ServletException\r
- {\r
- int rate = getRequestRate((HttpServletRequest) request);\r
- if (rate >= n_requests) {\r
- // drop request - only works under Jetty\r
- String m = String.format("Dropping connection: %s %d bad connections in %d minutes", getConnectionId((HttpServletRequest) request), rate, m_minutes);\r
- logger.info(m);\r
- Request base_request = (request instanceof Request)\r
- ? (Request) request\r
- : AbstractHttpConnection.getCurrentConnection().getRequest();\r
- base_request.getConnection().getEndPoint().close();\r
- } else {\r
- chain.doFilter(request, response);\r
- }\r
- }\r
- public void throttleFilter(HttpServletRequest request, HttpServletResponse response, FilterChain chain)\r
- throws IOException, ServletException\r
- {\r
- // throttle request\r
- String id = getConnectionId((HttpServletRequest) request);\r
- int rate = getRequestRate((HttpServletRequest) request);\r
- Object results = request.getAttribute(THROTTLE_MARKER);\r
- if (rate >= n_requests && results == null) {\r
- String m = String.format("Throttling connection: %s %d bad connections in %d minutes", getConnectionId((HttpServletRequest) request), rate, m_minutes);\r
- logger.info(m);\r
- Continuation continuation = ContinuationSupport.getContinuation(request);\r
- continuation.suspend();\r
- register(id, continuation);\r
- continuation.undispatch();\r
- } else {\r
- chain.doFilter(request, response);\r
- @SuppressWarnings("resource")\r
- InputStream is = request.getInputStream();\r
- byte[] b = new byte[4096];\r
- int n = is.read(b);\r
- while (n > 0) {\r
- n = is.read(b);\r
- }\r
- resume(id);\r
- }\r
- }\r
- private Map<String, List<Continuation>> suspended_requests = new HashMap<String, List<Continuation>>();\r
- private void register(String id, Continuation continuation) {\r
- synchronized (suspended_requests) {\r
- List<Continuation> list = suspended_requests.get(id);\r
- if (list == null) {\r
- list = new ArrayList<Continuation>();\r
- suspended_requests.put(id, list);\r
- }\r
- list.add(continuation);\r
- }\r
- }\r
- private void resume(String id) {\r
- synchronized (suspended_requests) {\r
- List<Continuation> list = suspended_requests.get(id);\r
- if (list != null) {\r
- // when the waited for event happens\r
- Continuation continuation = list.remove(0);\r
- continuation.setAttribute(ThrottleFilter.THROTTLE_MARKER, new Object());\r
- continuation.resume();\r
- }\r
- }\r
- }\r
+ public class Counter {\r
+ private List<Long> times = new Vector<Long>(); // a record of request times\r
\r
- /**\r
- * Return a count of number of requests in the last M minutes, iff this is a "bad" request.\r
- * If the request has been resumed (if it contains the THROTTLE_MARKER) it is considered good.\r
- * @param request the request\r
- * @return number of requests in the last M minutes, 0 means it is a "good" request\r
- */\r
- private int getRequestRate(HttpServletRequest request) {\r
- String expecthdr = request.getHeader("Expect");\r
- if (expecthdr != null && expecthdr.equalsIgnoreCase("100-continue"))\r
- return 0;\r
+ public int prune() {\r
+ try {\r
+ long n = System.currentTimeMillis() - (m_minutes * ONE_MINUTE);\r
+ long t = times.get(0);\r
+ while (t < n) {\r
+ times.remove(0);\r
+ t = times.get(0);\r
+ }\r
+ } catch (IndexOutOfBoundsException e) {\r
+ // ignore\r
+ }\r
+ return times.size();\r
+ }\r
\r
- String key = getConnectionId(request);\r
- synchronized (map) {\r
- Counter cnt = map.get(key);\r
- if (cnt == null) {\r
- cnt = new Counter();\r
- map.put(key, cnt);\r
- }\r
- int n = cnt.getRequestRate();\r
- return n;\r
- }\r
- }\r
+ public int getRequestRate() {\r
+ times.add(System.currentTimeMillis());\r
+ return prune();\r
+ }\r
+ }\r
\r
- public class Counter {\r
- private List<Long> times = new Vector<Long>(); // a record of request times\r
- public int prune() {\r
- try {\r
- long n = System.currentTimeMillis() - (m_minutes * ONE_MINUTE);\r
- long t = times.get(0);\r
- while (t < n) {\r
- times.remove(0);\r
- t = times.get(0);\r
- }\r
- } catch (IndexOutOfBoundsException e) {\r
- // ignore\r
- }\r
- return times.size();\r
- }\r
- public int getRequestRate() {\r
- times.add(System.currentTimeMillis());\r
- return prune();\r
- }\r
- }\r
+ /**\r
+ * Identify a connection by endpoint IP address, and feed ID.\r
+ */\r
+ private String getConnectionId(HttpServletRequest req) {\r
+ return req.getRemoteAddr() + "/" + getFeedId(req);\r
+ }\r
\r
- /**\r
- * Identify a connection by endpoint IP address, and feed ID.\r
- */\r
- private String getConnectionId(HttpServletRequest req) {\r
- return req.getRemoteAddr() + "/" + getFeedId(req);\r
- }\r
- private int getFeedId(HttpServletRequest req) {\r
- String path = req.getPathInfo();\r
- if (path == null || path.length() < 2)\r
- return -1;\r
- path = path.substring(1);\r
- int ix = path.indexOf('/');\r
- if (ix < 0 || ix == path.length()-1)\r
- return -2;\r
- try {\r
- int feedid = Integer.parseInt(path.substring(0, ix));\r
- return feedid;\r
- } catch (NumberFormatException e) {\r
- return -1;\r
- }\r
- }\r
+ private int getFeedId(HttpServletRequest req) {\r
+ String path = req.getPathInfo();\r
+ if (path == null || path.length() < 2)\r
+ return -1;\r
+ path = path.substring(1);\r
+ int ix = path.indexOf('/');\r
+ if (ix < 0 || ix == path.length() - 1)\r
+ return -2;\r
+ try {\r
+ int feedid = Integer.parseInt(path.substring(0, ix));\r
+ return feedid;\r
+ } catch (NumberFormatException e) {\r
+ return -1;\r
+ }\r
+ }\r
\r
- @Override\r
- public void run() {\r
- // Once every 5 minutes, go through the map, and remove empty entrys\r
- for (Object s : map.keySet().toArray()) {\r
- synchronized (map) {\r
- Counter c = map.get(s);\r
- if (c.prune() <= 0)\r
- map.remove(s);\r
- }\r
- }\r
- }\r
+ @Override\r
+ public void run() {\r
+ // Once every 5 minutes, go through the map, and remove empty entrys\r
+ for (Object s : map.keySet().toArray()) {\r
+ synchronized (map) {\r
+ Counter c = map.get(s);\r
+ if (c.prune() <= 0)\r
+ map.remove(s);\r
+ }\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: URLUtilities.java,v 1.2 2014/03/12 19:45:41 eby Exp $\r
*/\r
public class URLUtilities {\r
- /**\r
- * Generate the URL used to access a feed.\r
- * @param feedid the feed id\r
- * @return the URL\r
- */\r
- public static String generateFeedURL(int feedid) {\r
- return "https://" + BaseServlet.prov_name + "/feed/" + feedid;\r
- }\r
- /**\r
- * Generate the URL used to publish to a feed.\r
- * @param feedid the feed id\r
- * @return the URL\r
- */\r
- public static String generatePublishURL(int feedid) {\r
- return "https://" + BaseServlet.prov_name + "/publish/" + feedid;\r
- }\r
- /**\r
- * Generate the URL used to subscribe to a feed.\r
- * @param feedid the feed id\r
- * @return the URL\r
- */\r
- public static String generateSubscribeURL(int feedid) {\r
- return "https://" + BaseServlet.prov_name + "/subscribe/" + feedid;\r
- }\r
- /**\r
- * Generate the URL used to access a feed's logs.\r
- * @param feedid the feed id\r
- * @return the URL\r
- */\r
- public static String generateFeedLogURL(int feedid) {\r
- return "https://" + BaseServlet.prov_name + "/feedlog/" + feedid;\r
- }\r
- /**\r
- * Generate the URL used to access a subscription.\r
- * @param subid the subscription id\r
- * @return the URL\r
- */\r
- public static String generateSubscriptionURL(int subid) {\r
- return "https://" + BaseServlet.prov_name + "/subs/" + subid;\r
- }\r
- /**\r
- * Generate the URL used to access a subscription's logs.\r
- * @param subid the subscription id\r
- * @return the URL\r
- */\r
- public static String generateSubLogURL(int subid) {\r
- return "https://" + BaseServlet.prov_name + "/sublog/" + subid;\r
- }\r
- /**\r
- * Generate the URL used to access the provisioning data on the peer POD.\r
- * @return the URL\r
- */\r
- public static String generatePeerProvURL() {\r
- return "https://" + getPeerPodName() + "/internal/prov";\r
- }\r
- /**\r
- * Generate the URL used to access the logfile data on the peer POD.\r
- * @return the URL\r
- */\r
- public static String generatePeerLogsURL() {\r
- //Fixes for Itrack ticket - DATARTR-4#Fixing if only one Prov is configured, not to give exception to fill logs.\r
- String peerPodUrl = getPeerPodName();\r
- if(peerPodUrl.equals("") || peerPodUrl.equals(null)){\r
- return "";\r
- }\r
- \r
- return "https://" + peerPodUrl + "/internal/drlogs/";\r
- }\r
- /**\r
- * Return the real (non CNAME) version of the peer POD's DNS name.\r
- * @return the name\r
- */\r
- public static String getPeerPodName() {\r
- if (other_pod == null) {\r
- String this_pod = "";\r
- try {\r
- this_pod = InetAddress.getLocalHost().getHostName();\r
- System.out.println("this_pod: "+this_pod);\r
- } catch (UnknownHostException e) {\r
- this_pod = "";\r
- }\r
- System.out.println("ALL PODS: "+Arrays.asList(BaseServlet.getPods()));\r
- for (String pod : BaseServlet.getPods()) {\r
- if (!pod.equals(this_pod))\r
- other_pod = pod;\r
- }\r
- }\r
- return other_pod;\r
- }\r
- private static String other_pod;\r
+ /**\r
+ * Generate the URL used to access a feed.\r
+ *\r
+ * @param feedid the feed id\r
+ * @return the URL\r
+ */\r
+ public static String generateFeedURL(int feedid) {\r
+ return "https://" + BaseServlet.prov_name + "/feed/" + feedid;\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to publish to a feed.\r
+ *\r
+ * @param feedid the feed id\r
+ * @return the URL\r
+ */\r
+ public static String generatePublishURL(int feedid) {\r
+ return "https://" + BaseServlet.prov_name + "/publish/" + feedid;\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to subscribe to a feed.\r
+ *\r
+ * @param feedid the feed id\r
+ * @return the URL\r
+ */\r
+ public static String generateSubscribeURL(int feedid) {\r
+ return "https://" + BaseServlet.prov_name + "/subscribe/" + feedid;\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to access a feed's logs.\r
+ *\r
+ * @param feedid the feed id\r
+ * @return the URL\r
+ */\r
+ public static String generateFeedLogURL(int feedid) {\r
+ return "https://" + BaseServlet.prov_name + "/feedlog/" + feedid;\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to access a subscription.\r
+ *\r
+ * @param subid the subscription id\r
+ * @return the URL\r
+ */\r
+ public static String generateSubscriptionURL(int subid) {\r
+ return "https://" + BaseServlet.prov_name + "/subs/" + subid;\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to access a subscription's logs.\r
+ *\r
+ * @param subid the subscription id\r
+ * @return the URL\r
+ */\r
+ public static String generateSubLogURL(int subid) {\r
+ return "https://" + BaseServlet.prov_name + "/sublog/" + subid;\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to access the provisioning data on the peer POD.\r
+ *\r
+ * @return the URL\r
+ */\r
+ public static String generatePeerProvURL() {\r
+ return "https://" + getPeerPodName() + "/internal/prov";\r
+ }\r
+\r
+ /**\r
+ * Generate the URL used to access the logfile data on the peer POD.\r
+ *\r
+ * @return the URL\r
+ */\r
+ public static String generatePeerLogsURL() {\r
+ //Fixes for Itrack ticket - DATARTR-4#Fixing if only one Prov is configured, not to give exception to fill logs.\r
+ String peerPodUrl = getPeerPodName();\r
+ if (peerPodUrl.equals("") || peerPodUrl.equals(null)) {\r
+ return "";\r
+ }\r
+\r
+ return "https://" + peerPodUrl + "/internal/drlogs/";\r
+ }\r
+\r
+ /**\r
+ * Return the real (non CNAME) version of the peer POD's DNS name.\r
+ *\r
+ * @return the name\r
+ */\r
+ public static String getPeerPodName() {\r
+ if (other_pod == null) {\r
+ String this_pod = "";\r
+ try {\r
+ this_pod = InetAddress.getLocalHost().getHostName();\r
+ System.out.println("this_pod: " + this_pod);\r
+ } catch (UnknownHostException e) {\r
+ this_pod = "";\r
+ }\r
+ System.out.println("ALL PODS: " + Arrays.asList(BaseServlet.getPods()));\r
+ for (String pod : BaseServlet.getPods()) {\r
+ if (!pod.equals(this_pod))\r
+ other_pod = pod;\r
+ }\r
+ }\r
+ return other_pod;\r
+ }\r
+\r
+ private static String other_pod;\r
}\r
# ============LICENSE_START==================================================\r
# * org.onap.dmaap\r
# * ===========================================================================\r
-# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
+# * Copyright � 2017 AT&T Intellectual Property. All rights reserved.\r
# * ===========================================================================\r
# * Licensed under the Apache License, Version 2.0 (the "License");\r
# * you may not use this file except in compliance with the License.\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: DailyLatencyReport.java,v 1.2 2013/11/06 16:23:54 eby Exp $\r
*/\r
public class DailyLatencyReport extends ReportBase {\r
- private static final String SELECT_SQL =\r
- "select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +\r
- " where EVENT_TIME >= ? and EVENT_TIME <= ?";\r
+ private static final String SELECT_SQL =\r
+ "select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +\r
+ " where EVENT_TIME >= ? and EVENT_TIME <= ?";\r
\r
- private class Job {\r
- public long pubtime = 0;\r
- public long clen = 0;\r
- public List<Long> deltime = new ArrayList<Long>();\r
- public long minLatency() {\r
- long n = deltime.isEmpty() ? 0 : Long.MAX_VALUE;\r
- for (Long l : deltime)\r
- n = Math.min(n, l-pubtime);\r
- return n;\r
- }\r
- public long maxLatency() {\r
- long n = 0;\r
- for (Long l : deltime)\r
- n = Math.max(n, l-pubtime);\r
- return n;\r
- }\r
- public long totalLatency() {\r
- long n = 0;\r
- for (Long l : deltime)\r
- n += (l-pubtime);\r
- return n;\r
- }\r
- }\r
- private class Counters {\r
- public final String date;\r
- public final int feedid;\r
- public final Map<String, Job> jobs;\r
- public Counters(String d, int fid) {\r
- date = d;\r
- feedid = fid;\r
- jobs = new HashMap<String, Job>();\r
- }\r
- public void addEvent(long etime, String type, String id, String fid, long clen) {\r
- Job j = jobs.get(id);\r
- if (j == null) {\r
- j = new Job();\r
- jobs.put(id, j);\r
- }\r
- if (type.equals("pub")) {\r
- j.pubtime = getPstart(id);\r
- j.clen = clen;\r
- } else if (type.equals("del")) {\r
- j.deltime.add(etime);\r
- }\r
- }\r
- @Override\r
- public String toString() {\r
- long minsize = Long.MAX_VALUE, maxsize = 0, avgsize = 0;\r
- long minl = Long.MAX_VALUE, maxl = 0;\r
- long fanout = 0, totall = 0, totaln = 0;\r
- for (Job j : jobs.values()) {\r
- minsize = Math.min(minsize, j.clen);\r
- maxsize = Math.max(maxsize, j.clen);\r
- avgsize += j.clen;\r
- minl = Math.min(minl, j.minLatency());\r
- maxl = Math.max(maxl, j.maxLatency());\r
- totall += j.totalLatency();\r
- totaln += j.deltime.size();\r
- fanout += j.deltime.size();\r
- }\r
- if (jobs.size() > 0) {\r
- avgsize /= jobs.size();\r
- fanout /= jobs.size();\r
- }\r
- long avgl = (totaln > 0) ? (totall / totaln) : 0;\r
- return date + "," + feedid + "," + minsize + "," + maxsize + "," + avgsize + "," + minl + "," + maxl + "," + avgl + "," + fanout;\r
- }\r
- }\r
- private long getPstart(String t) {\r
- if (t.indexOf('.') > 0)\r
- t = t.substring(0, t.indexOf('.'));\r
- return Long.parseLong(t);\r
- }\r
+ private class Job {\r
+ public long pubtime = 0;\r
+ public long clen = 0;\r
+ public List<Long> deltime = new ArrayList<Long>();\r
+ public long minLatency() {\r
+ long n = deltime.isEmpty() ? 0 : Long.MAX_VALUE;\r
+ for (Long l : deltime)\r
+ n = Math.min(n, l-pubtime);\r
+ return n;\r
+ }\r
+ public long maxLatency() {\r
+ long n = 0;\r
+ for (Long l : deltime)\r
+ n = Math.max(n, l-pubtime);\r
+ return n;\r
+ }\r
+ public long totalLatency() {\r
+ long n = 0;\r
+ for (Long l : deltime)\r
+ n += (l-pubtime);\r
+ return n;\r
+ }\r
+ }\r
+ private class Counters {\r
+ public final String date;\r
+ public final int feedid;\r
+ public final Map<String, Job> jobs;\r
+ public Counters(String d, int fid) {\r
+ date = d;\r
+ feedid = fid;\r
+ jobs = new HashMap<String, Job>();\r
+ }\r
+ public void addEvent(long etime, String type, String id, String fid, long clen) {\r
+ Job j = jobs.get(id);\r
+ if (j == null) {\r
+ j = new Job();\r
+ jobs.put(id, j);\r
+ }\r
+ if (type.equals("pub")) {\r
+ j.pubtime = getPstart(id);\r
+ j.clen = clen;\r
+ } else if (type.equals("del")) {\r
+ j.deltime.add(etime);\r
+ }\r
+ }\r
+ @Override\r
+ public String toString() {\r
+ long minsize = Long.MAX_VALUE, maxsize = 0, avgsize = 0;\r
+ long minl = Long.MAX_VALUE, maxl = 0;\r
+ long fanout = 0, totall = 0, totaln = 0;\r
+ for (Job j : jobs.values()) {\r
+ minsize = Math.min(minsize, j.clen);\r
+ maxsize = Math.max(maxsize, j.clen);\r
+ avgsize += j.clen;\r
+ minl = Math.min(minl, j.minLatency());\r
+ maxl = Math.max(maxl, j.maxLatency());\r
+ totall += j.totalLatency();\r
+ totaln += j.deltime.size();\r
+ fanout += j.deltime.size();\r
+ }\r
+ if (jobs.size() > 0) {\r
+ avgsize /= jobs.size();\r
+ fanout /= jobs.size();\r
+ }\r
+ long avgl = (totaln > 0) ? (totall / totaln) : 0;\r
+ return date + "," + feedid + "," + minsize + "," + maxsize + "," + avgsize + "," + minl + "," + maxl + "," + avgl + "," + fanout;\r
+ }\r
+ }\r
+ private long getPstart(String t) {\r
+ if (t.indexOf('.') > 0)\r
+ t = t.substring(0, t.indexOf('.'));\r
+ return Long.parseLong(t);\r
+ }\r
\r
- @Override\r
- public void run() {\r
- Map<String, Counters> map = new HashMap<String, Counters>();\r
- SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
- long start = System.currentTimeMillis();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- String id = rs.getString("PUBLISH_ID");\r
- int feed = rs.getInt("FEEDID");\r
- long etime = rs.getLong("EVENT_TIME");\r
- String type = rs.getString("TYPE");\r
- String fid = rs.getString("FEED_FILEID");\r
- long clen = rs.getLong("CONTENT_LENGTH");\r
- String date = sdf.format(new Date(getPstart(id)));\r
- String key = date + "," + feed;\r
- Counters c = map.get(key);\r
- if (c == null) {\r
- c = new Counters(date, feed);\r
- map.put(key, c);\r
- }\r
- c.addEvent(etime, type, id, fid, clen);\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- try {\r
- PrintWriter os = new PrintWriter(outfile);\r
- os.println("date,feedid,minsize,maxsize,avgsize,minlat,maxlat,avglat,fanout");\r
- for (String key : new TreeSet<String>(map.keySet())) {\r
- Counters c = map.get(key);\r
- os.println(c.toString());\r
- }\r
- os.close();\r
- } catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
- }\r
- }\r
+ @Override\r
+ public void run() {\r
+ Map<String, Counters> map = new HashMap<String, Counters>();\r
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
+ long start = System.currentTimeMillis();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
+ ps.setLong(1, from);\r
+ ps.setLong(2, to);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ String id = rs.getString("PUBLISH_ID");\r
+ int feed = rs.getInt("FEEDID");\r
+ long etime = rs.getLong("EVENT_TIME");\r
+ String type = rs.getString("TYPE");\r
+ String fid = rs.getString("FEED_FILEID");\r
+ long clen = rs.getLong("CONTENT_LENGTH");\r
+ String date = sdf.format(new Date(getPstart(id)));\r
+ String key = date + "," + feed;\r
+ Counters c = map.get(key);\r
+ if (c == null) {\r
+ c = new Counters(date, feed);\r
+ map.put(key, c);\r
+ }\r
+ c.addEvent(etime, type, id, fid, clen);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
+ try {\r
+ PrintWriter os = new PrintWriter(outfile);\r
+ os.println("date,feedid,minsize,maxsize,avgsize,minlat,maxlat,avglat,fanout");\r
+ for (String key : new TreeSet<String>(map.keySet())) {\r
+ Counters c = map.get(key);\r
+ os.println(c.toString());\r
+ }\r
+ os.close();\r
+ } catch (FileNotFoundException e) {\r
+ System.err.println("File cannot be written: "+outfile);\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: FeedReport.java,v 1.2 2013/11/06 16:23:55 eby Exp $\r
*/\r
public class FeedReport extends ReportBase {\r
- private static final String SELECT_SQL =\r
- // Note to use the time in the publish_id, use date(from_unixtime(substring(publish_id, 1, 10)))\r
- // To just use month, substring(from_unixtime(event_time div 1000), 1, 7)\r
- "select date(from_unixtime(event_time div 1000)) as date, type, feedid, delivery_subid, count(*) as count" +\r
- " from LOG_RECORDS" +\r
- " where type = 'pub' or type = 'del'" +\r
- " group by date, type, feedid, delivery_subid";\r
- private static final String SELECT_SQL_OLD =\r
- "select PUBLISH_ID, TYPE, FEEDID, DELIVERY_SUBID from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ?";\r
+ private static final String SELECT_SQL =\r
+ // Note to use the time in the publish_id, use date(from_unixtime(substring(publish_id, 1, 10)))\r
+ // To just use month, substring(from_unixtime(event_time div 1000), 1, 7)\r
+ "select date(from_unixtime(event_time div 1000)) as date, type, feedid, delivery_subid, count(*) as count" +\r
+ " from LOG_RECORDS" +\r
+ " where type = 'pub' or type = 'del'" +\r
+ " group by date, type, feedid, delivery_subid";\r
+ private static final String SELECT_SQL_OLD =\r
+ "select PUBLISH_ID, TYPE, FEEDID, DELIVERY_SUBID from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ?";\r
+\r
+ @Override\r
+ public void run() {\r
+ boolean alg1 = true;\r
+ JSONObject jo = new JSONObject();\r
+ long start = System.currentTimeMillis();\r
+ StringBuilder sb = new StringBuilder();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
+// ps.setLong(1, from);\r
+// ps.setLong(2, to);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ if (alg1) {\r
+ String date = rs.getString("date");\r
+ String type = rs.getString("type");\r
+ int feedid = rs.getInt("feedid");\r
+ int subid = type.equals("del") ? rs.getInt("delivery_subid") : 0;\r
+ int count = rs.getInt("count");\r
+ sb.append(date + "," + type + "," + feedid + "," + subid + "," + count + "\n");\r
+ } else {\r
+ String date = rs.getString("date");\r
+ JSONObject datemap = jo.optJSONObject(date);\r
+ if (datemap == null) {\r
+ datemap = new JSONObject();\r
+ jo.put(date, datemap);\r
+ }\r
+ int feed = rs.getInt("FEEDID");\r
+ JSONObject feedmap = datemap.optJSONObject("" + feed);\r
+ if (feedmap == null) {\r
+ feedmap = new JSONObject();\r
+ feedmap.put("pubcount", 0);\r
+ datemap.put("" + feed, feedmap);\r
+ }\r
+ String type = rs.getString("TYPE");\r
+ int count = rs.getInt("count");\r
+ if (type.equals("pub")) {\r
+ feedmap.put("pubcount", count);\r
+ } else if (type.equals("del")) {\r
+ String subid = "" + rs.getInt("DELIVERY_SUBID");\r
+ feedmap.put(subid, count);\r
+ }\r
+ }\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
+ try {\r
+ PrintWriter os = new PrintWriter(outfile);\r
+ if (alg1) {\r
+ os.print("date,type,feedid,subid,count\n");\r
+ os.print(sb.toString());\r
+ } else {\r
+ os.println(toHTML(jo));\r
+ }\r
+ os.close();\r
+ } catch (FileNotFoundException e) {\r
+ System.err.println("File cannot be written: " + outfile);\r
+ }\r
+ }\r
+\r
+ public void run2() {\r
+ JSONObject jo = new JSONObject();\r
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
+ long start = System.currentTimeMillis();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(SELECT_SQL_OLD);\r
+ ps.setLong(1, from);\r
+ ps.setLong(2, to);\r
+ ps.setFetchSize(100000);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ String id = rs.getString("PUBLISH_ID");\r
+ String date = sdf.format(new Date(getPstart(id)));\r
+ JSONObject datemap = jo.optJSONObject(date);\r
+ if (datemap == null) {\r
+ datemap = new JSONObject();\r
+ jo.put(date, datemap);\r
+ }\r
+ int feed = rs.getInt("FEEDID");\r
+ JSONObject feedmap = datemap.optJSONObject("" + feed);\r
+ if (feedmap == null) {\r
+ feedmap = new JSONObject();\r
+ feedmap.put("pubcount", 0);\r
+ datemap.put("" + feed, feedmap);\r
+ }\r
+ String type = rs.getString("TYPE");\r
+ if (type.equals("pub")) {\r
+ try {\r
+ int n = feedmap.getInt("pubcount");\r
+ feedmap.put("pubcount", n + 1);\r
+ } catch (JSONException e) {\r
+ feedmap.put("pubcount", 1);\r
+ }\r
+ } else if (type.equals("del")) {\r
+ String subid = "" + rs.getInt("DELIVERY_SUBID");\r
+ try {\r
+ int n = feedmap.getInt(subid);\r
+ feedmap.put(subid, n + 1);\r
+ } catch (JSONException e) {\r
+ feedmap.put(subid, 1);\r
+ }\r
+ }\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
+ try {\r
+ PrintWriter os = new PrintWriter(outfile);\r
+ os.println(toHTML(jo));\r
+ os.close();\r
+ } catch (FileNotFoundException e) {\r
+ System.err.println("File cannot be written: " + outfile);\r
+ }\r
+ }\r
+\r
+ private long getPstart(String t) {\r
+ if (t.indexOf('.') > 0)\r
+ t = t.substring(0, t.indexOf('.'));\r
+ return Long.parseLong(t);\r
+ }\r
+\r
+ @SuppressWarnings("unused")\r
+ private static String toHTMLNested(JSONObject jo) {\r
+ StringBuilder s = new StringBuilder();\r
+ s.append("<table>\n");\r
+ s.append("<tr><th>Date</th><th>Feeds</th></tr>\n");\r
+ String[] dates = JSONObject.getNames(jo);\r
+ Arrays.sort(dates);\r
+ for (int i = dates.length - 1; i >= 0; i--) {\r
+ String date = dates[i];\r
+ JSONObject j2 = jo.getJSONObject(date);\r
+ String[] feeds = JSONObject.getNames(j2);\r
+ Arrays.sort(feeds);\r
+ s.append("<tr><td>" + date + "</td><td>");\r
+ s.append(feeds.length).append(feeds.length > 1 ? " Feeds\n" : " Feed\n");\r
+ s.append("<table>\n");\r
+ s.append("<tr><th>Feed ID</th><th>Publish Count</th><th>Subscriptions</th></tr>\n");\r
+ for (String feed : feeds) {\r
+ JSONObject j3 = j2.getJSONObject(feed);\r
+ String[] subs = JSONObject.getNames(j3);\r
+ Arrays.sort(subs);\r
+ s.append("<tr><td>" + feed + "</td>");\r
+ s.append("<td>" + j3.getInt("pubcount") + "</td>");\r
+ int scnt = j3.length() - 1;\r
+ s.append("<td>").append(scnt).append(" Subcription");\r
+ if (scnt > 1)\r
+ s.append("s");\r
+ s.append("<table>\n");\r
+ s.append("<tr><th>Sub ID</th><th>Delivery Count</th></tr>\n");\r
+ for (String sub : subs) {\r
+ if (!sub.equals("pubcount")) {\r
+ s.append("<tr><td>" + sub + "</td>");\r
+ s.append("<td>" + j3.getInt(sub) + "</td>");\r
+ s.append("</td></tr>\n");\r
+ }\r
+ }\r
+ s.append("</table>\n");\r
\r
- @Override\r
- public void run() {\r
- boolean alg1 = true;\r
- JSONObject jo = new JSONObject();\r
- long start = System.currentTimeMillis();\r
- StringBuilder sb = new StringBuilder();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
-// ps.setLong(1, from);\r
-// ps.setLong(2, to);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- if (alg1) {\r
- String date = rs.getString("date");\r
- String type = rs.getString("type");\r
- int feedid = rs.getInt("feedid");\r
- int subid = type.equals("del") ? rs.getInt("delivery_subid") : 0;\r
- int count = rs.getInt("count");\r
- sb.append(date + "," + type + "," + feedid + "," + subid + "," + count + "\n");\r
- } else {\r
- String date = rs.getString("date");\r
- JSONObject datemap = jo.optJSONObject(date);\r
- if (datemap == null) {\r
- datemap = new JSONObject();\r
- jo.put(date, datemap);\r
- }\r
- int feed = rs.getInt("FEEDID");\r
- JSONObject feedmap = datemap.optJSONObject(""+feed);\r
- if (feedmap == null) {\r
- feedmap = new JSONObject();\r
- feedmap.put("pubcount", 0);\r
- datemap.put(""+feed, feedmap);\r
- }\r
- String type = rs.getString("TYPE");\r
- int count = rs.getInt("count");\r
- if (type.equals("pub")) {\r
- feedmap.put("pubcount", count);\r
- } else if (type.equals("del")) {\r
- String subid = ""+rs.getInt("DELIVERY_SUBID");\r
- feedmap.put(subid, count);\r
- }\r
- }\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- try {\r
- PrintWriter os = new PrintWriter(outfile);\r
- if (alg1) {\r
- os.print("date,type,feedid,subid,count\n");\r
- os.print(sb.toString());\r
- } else {\r
- os.println(toHTML(jo));\r
- }\r
- os.close();\r
- } catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
- }\r
- }\r
+ s.append("</td></tr>\n");\r
+ }\r
+ s.append("</table>\n");\r
+ s.append("</td></tr>\n");\r
+ }\r
+ s.append("</table>\n");\r
+ return s.toString();\r
+ }\r
\r
- public void run2() {\r
- JSONObject jo = new JSONObject();\r
- SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
- long start = System.currentTimeMillis();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(SELECT_SQL_OLD);\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- ps.setFetchSize(100000);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- String id = rs.getString("PUBLISH_ID");\r
- String date = sdf.format(new Date(getPstart(id)));\r
- JSONObject datemap = jo.optJSONObject(date);\r
- if (datemap == null) {\r
- datemap = new JSONObject();\r
- jo.put(date, datemap);\r
- }\r
- int feed = rs.getInt("FEEDID");\r
- JSONObject feedmap = datemap.optJSONObject(""+feed);\r
- if (feedmap == null) {\r
- feedmap = new JSONObject();\r
- feedmap.put("pubcount", 0);\r
- datemap.put(""+feed, feedmap);\r
- }\r
- String type = rs.getString("TYPE");\r
- if (type.equals("pub")) {\r
- try {\r
- int n = feedmap.getInt("pubcount");\r
- feedmap.put("pubcount", n+1);\r
- } catch (JSONException e) {\r
- feedmap.put("pubcount", 1);\r
- }\r
- } else if (type.equals("del")) {\r
- String subid = ""+rs.getInt("DELIVERY_SUBID");\r
- try {\r
- int n = feedmap.getInt(subid);\r
- feedmap.put(subid, n+1);\r
- } catch (JSONException e) {\r
- feedmap.put(subid, 1);\r
- }\r
- }\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- try {\r
- PrintWriter os = new PrintWriter(outfile);\r
- os.println(toHTML(jo));\r
- os.close();\r
- } catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
- }\r
- }\r
- private long getPstart(String t) {\r
- if (t.indexOf('.') > 0)\r
- t = t.substring(0, t.indexOf('.'));\r
- return Long.parseLong(t);\r
- }\r
- @SuppressWarnings("unused")\r
- private static String toHTMLNested(JSONObject jo) {\r
- StringBuilder s = new StringBuilder();\r
- s.append("<table>\n");\r
- s.append("<tr><th>Date</th><th>Feeds</th></tr>\n");\r
- String[] dates = JSONObject.getNames(jo);\r
- Arrays.sort(dates);\r
- for (int i = dates.length-1; i >= 0; i--) {\r
- String date = dates[i];\r
- JSONObject j2 = jo.getJSONObject(date);\r
- String[] feeds = JSONObject.getNames(j2);\r
- Arrays.sort(feeds);\r
- s.append("<tr><td>"+date+"</td><td>");\r
- s.append(feeds.length).append(feeds.length > 1 ? " Feeds\n" : " Feed\n");\r
- s.append("<table>\n");\r
- s.append("<tr><th>Feed ID</th><th>Publish Count</th><th>Subscriptions</th></tr>\n");\r
- for (String feed : feeds) {\r
- JSONObject j3 = j2.getJSONObject(feed);\r
- String[] subs = JSONObject.getNames(j3);\r
- Arrays.sort(subs);\r
- s.append("<tr><td>"+feed+"</td>");\r
- s.append("<td>"+j3.getInt("pubcount")+"</td>");\r
- int scnt = j3.length()-1;\r
- s.append("<td>").append(scnt).append(" Subcription");\r
- if (scnt > 1)\r
- s.append("s");\r
- s.append("<table>\n");\r
- s.append("<tr><th>Sub ID</th><th>Delivery Count</th></tr>\n");\r
- for (String sub : subs) {\r
- if (!sub.equals("pubcount")) {\r
- s.append("<tr><td>"+sub+"</td>");\r
- s.append("<td>"+j3.getInt(sub)+"</td>");\r
- s.append("</td></tr>\n");\r
- }\r
- }\r
- s.append("</table>\n");\r
+ private static String toHTML(JSONObject jo) {\r
+ StringBuilder s = new StringBuilder();\r
+ s.append("<table>\n");\r
+ s.append("<tr><th>Date</th><th>Feeds</th><th>Feed ID</th><th>Publish Count</th><th>Subs</th><th>Sub ID</th><th>Delivery Count</th></tr>\n");\r
+ String[] dates = JSONObject.getNames(jo);\r
+ Arrays.sort(dates);\r
+ for (int i = dates.length - 1; i >= 0; i--) {\r
+ String date = dates[i];\r
+ JSONObject j2 = jo.getJSONObject(date);\r
+ int rc1 = countrows(j2);\r
+ String[] feeds = JSONObject.getNames(j2);\r
+ Arrays.sort(feeds);\r
+ s.append("<tr><td rowspan=\"" + rc1 + "\">")\r
+ .append(date)\r
+ .append("</td>");\r
+ s.append("<td rowspan=\"" + rc1 + "\">")\r
+ .append(feeds.length)\r
+ .append("</td>");\r
+ String px1 = "";\r
+ for (String feed : feeds) {\r
+ JSONObject j3 = j2.getJSONObject(feed);\r
+ int pubcount = j3.getInt("pubcount");\r
+ int subcnt = j3.length() - 1;\r
+ int rc2 = (subcnt < 1) ? 1 : subcnt;\r
+ String[] subs = JSONObject.getNames(j3);\r
+ Arrays.sort(subs);\r
+ s.append(px1)\r
+ .append("<td rowspan=\"" + rc2 + "\">")\r
+ .append(feed)\r
+ .append("</td>");\r
+ s.append("<td rowspan=\"" + rc2 + "\">")\r
+ .append(pubcount)\r
+ .append("</td>");\r
+ s.append("<td rowspan=\"" + rc2 + "\">")\r
+ .append(subcnt)\r
+ .append("</td>");\r
+ String px2 = "";\r
+ for (String sub : subs) {\r
+ if (!sub.equals("pubcount")) {\r
+ s.append(px2);\r
+ s.append("<td>" + sub + "</td>");\r
+ s.append("<td>" + j3.getInt(sub) + "</td>");\r
+ s.append("</tr>\n");\r
+ px2 = "<tr>";\r
+ }\r
+ }\r
+ if (px2.equals(""))\r
+ s.append("<td></td><td></td></tr>\n");\r
+ px1 = "<tr>";\r
+ }\r
+ }\r
+ s.append("</table>\n");\r
+ return s.toString();\r
+ }\r
\r
- s.append("</td></tr>\n");\r
- }\r
- s.append("</table>\n");\r
- s.append("</td></tr>\n");\r
- }\r
- s.append("</table>\n");\r
- return s.toString();\r
- }\r
- private static String toHTML(JSONObject jo) {\r
- StringBuilder s = new StringBuilder();\r
- s.append("<table>\n");\r
- s.append("<tr><th>Date</th><th>Feeds</th><th>Feed ID</th><th>Publish Count</th><th>Subs</th><th>Sub ID</th><th>Delivery Count</th></tr>\n");\r
- String[] dates = JSONObject.getNames(jo);\r
- Arrays.sort(dates);\r
- for (int i = dates.length-1; i >= 0; i--) {\r
- String date = dates[i];\r
- JSONObject j2 = jo.getJSONObject(date);\r
- int rc1 = countrows(j2);\r
- String[] feeds = JSONObject.getNames(j2);\r
- Arrays.sort(feeds);\r
- s.append("<tr><td rowspan=\"" + rc1 + "\">")\r
- .append(date)\r
- .append("</td>");\r
- s.append("<td rowspan=\"" + rc1 + "\">")\r
- .append(feeds.length)\r
- .append("</td>");\r
- String px1 = "";\r
- for (String feed : feeds) {\r
- JSONObject j3 = j2.getJSONObject(feed);\r
- int pubcount = j3.getInt("pubcount");\r
- int subcnt = j3.length()-1;\r
- int rc2 = (subcnt < 1) ? 1 : subcnt;\r
- String[] subs = JSONObject.getNames(j3);\r
- Arrays.sort(subs);\r
- s.append(px1)\r
- .append("<td rowspan=\"" + rc2 + "\">")\r
- .append(feed)\r
- .append("</td>");\r
- s.append("<td rowspan=\"" + rc2 + "\">")\r
- .append(pubcount)\r
- .append("</td>");\r
- s.append("<td rowspan=\"" + rc2 + "\">")\r
- .append(subcnt)\r
- .append("</td>");\r
- String px2 = "";\r
- for (String sub : subs) {\r
- if (!sub.equals("pubcount")) {\r
- s.append(px2);\r
- s.append("<td>"+sub+"</td>");\r
- s.append("<td>"+j3.getInt(sub)+"</td>");\r
- s.append("</tr>\n");\r
- px2 = "<tr>";\r
- }\r
- }\r
- if (px2.equals(""))\r
- s.append("<td></td><td></td></tr>\n");\r
- px1 = "<tr>";\r
- }\r
- }\r
- s.append("</table>\n");\r
- return s.toString();\r
- }\r
- private static int countrows(JSONObject x) {\r
- int n = 0;\r
- for (String feed : JSONObject.getNames(x)) {\r
- JSONObject j3 = x.getJSONObject(feed);\r
- int subcnt = j3.length()-1;\r
- int rc2 = (subcnt < 1) ? 1 : subcnt;\r
- n += rc2;\r
- }\r
- return (n > 0) ? n : 1;\r
- }\r
+ private static int countrows(JSONObject x) {\r
+ int n = 0;\r
+ for (String feed : JSONObject.getNames(x)) {\r
+ JSONObject j3 = x.getJSONObject(feed);\r
+ int subcnt = j3.length() - 1;\r
+ int rc2 = (subcnt < 1) ? 1 : subcnt;\r
+ n += rc2;\r
+ }\r
+ return (n > 0) ? n : 1;\r
+ }\r
\r
- /**\r
- * Convert a .CSV file (as generated by the normal FeedReport mechanism) to an HTML table.\r
- * @param args\r
- */\r
- public static void main(String[] args) {\r
- int rtype = 0; // 0 -> day, 1 -> week, 2 -> month, 3 -> year\r
- String infile = null;\r
- String outfile = null;\r
- for (int i = 0; i < args.length; i++) {\r
- if (args[i].equals("-t")) {\r
- switch (args[++i].charAt(0)) {\r
- case 'w': rtype = 1; break;\r
- case 'm': rtype = 2; break;\r
- case 'y': rtype = 3; break;\r
- default: rtype = 0; break;\r
- }\r
- } else if (infile == null) {\r
- infile = args[i];\r
- } else if (outfile == null) {\r
- outfile = args[i];\r
- }\r
- }\r
- if (infile == null) {\r
- System.err.println("usage: FeedReport [ -t <reporttype> ] [ <input .csv> ] [ <output .html> ]");\r
- System.exit(1);\r
- }\r
- try {\r
- JSONObject jo = new JSONObject();\r
- LineNumberReader lr = new LineNumberReader(new FileReader(infile));\r
- String line = lr.readLine();\r
- while (line != null) {\r
- String[] tt = line.split(",");\r
- if (tt[0].startsWith("2")) {\r
- String date = tt[0];\r
- switch (rtype) {\r
- case 1:\r
- String[] xx = date.split("-");\r
- Calendar cal = new GregorianCalendar(new Integer(xx[0]), new Integer(xx[1])-1, new Integer(xx[2]));\r
- date = xx[0] + "-W" + cal.get(Calendar.WEEK_OF_YEAR);\r
- break;\r
- case 2: date = date.substring(0, 7); break;\r
- case 3: date = date.substring(0, 4); break;\r
- }\r
- JSONObject datemap = jo.optJSONObject(date);\r
- if (datemap == null) {\r
- datemap = new JSONObject();\r
- jo.put(date, datemap);\r
- }\r
- int feed = Integer.parseInt(tt[2]);\r
- JSONObject feedmap = datemap.optJSONObject(""+feed);\r
- if (feedmap == null) {\r
- feedmap = new JSONObject();\r
- feedmap.put("pubcount", 0);\r
- datemap.put(""+feed, feedmap);\r
- }\r
- String type = tt[1];\r
- int count = Integer.parseInt(tt[4]);\r
- if (type.equals("pub")) {\r
- try {\r
- int n = feedmap.getInt("pubcount");\r
- feedmap.put("pubcount", n+count);\r
- } catch (JSONException e) {\r
- feedmap.put("pubcount", count);\r
- }\r
- } else if (type.equals("del")) {\r
- String subid = tt[3];\r
- try {\r
- int n = feedmap.getInt(subid);\r
- feedmap.put(subid, n+count);\r
- } catch (JSONException e) {\r
- feedmap.put(subid, count);\r
- }\r
- }\r
- }\r
- line = lr.readLine();\r
- }\r
- lr.close();\r
- String t = toHTML(jo);\r
- switch (rtype) {\r
- case 1: t = t.replaceAll("<th>Date</th>", "<th>Week</th>"); break;\r
- case 2: t = t.replaceAll("<th>Date</th>", "<th>Month</th>"); break;\r
- case 3: t = t.replaceAll("<th>Date</th>", "<th>Year</th>"); break;\r
- }\r
- System.out.println(t);\r
- } catch (Exception e) {\r
- System.err.println(e);\r
- e.printStackTrace();\r
- }\r
- }\r
+ /**\r
+ * Convert a .CSV file (as generated by the normal FeedReport mechanism) to an HTML table.\r
+ *\r
+ * @param args\r
+ */\r
+ public static void main(String[] args) {\r
+ int rtype = 0; // 0 -> day, 1 -> week, 2 -> month, 3 -> year\r
+ String infile = null;\r
+ String outfile = null;\r
+ for (int i = 0; i < args.length; i++) {\r
+ if (args[i].equals("-t")) {\r
+ switch (args[++i].charAt(0)) {\r
+ case 'w':\r
+ rtype = 1;\r
+ break;\r
+ case 'm':\r
+ rtype = 2;\r
+ break;\r
+ case 'y':\r
+ rtype = 3;\r
+ break;\r
+ default:\r
+ rtype = 0;\r
+ break;\r
+ }\r
+ } else if (infile == null) {\r
+ infile = args[i];\r
+ } else if (outfile == null) {\r
+ outfile = args[i];\r
+ }\r
+ }\r
+ if (infile == null) {\r
+ System.err.println("usage: FeedReport [ -t <reporttype> ] [ <input .csv> ] [ <output .html> ]");\r
+ System.exit(1);\r
+ }\r
+ try {\r
+ JSONObject jo = new JSONObject();\r
+ LineNumberReader lr = new LineNumberReader(new FileReader(infile));\r
+ String line = lr.readLine();\r
+ while (line != null) {\r
+ String[] tt = line.split(",");\r
+ if (tt[0].startsWith("2")) {\r
+ String date = tt[0];\r
+ switch (rtype) {\r
+ case 1:\r
+ String[] xx = date.split("-");\r
+ Calendar cal = new GregorianCalendar(new Integer(xx[0]), new Integer(xx[1]) - 1, new Integer(xx[2]));\r
+ date = xx[0] + "-W" + cal.get(Calendar.WEEK_OF_YEAR);\r
+ break;\r
+ case 2:\r
+ date = date.substring(0, 7);\r
+ break;\r
+ case 3:\r
+ date = date.substring(0, 4);\r
+ break;\r
+ }\r
+ JSONObject datemap = jo.optJSONObject(date);\r
+ if (datemap == null) {\r
+ datemap = new JSONObject();\r
+ jo.put(date, datemap);\r
+ }\r
+ int feed = Integer.parseInt(tt[2]);\r
+ JSONObject feedmap = datemap.optJSONObject("" + feed);\r
+ if (feedmap == null) {\r
+ feedmap = new JSONObject();\r
+ feedmap.put("pubcount", 0);\r
+ datemap.put("" + feed, feedmap);\r
+ }\r
+ String type = tt[1];\r
+ int count = Integer.parseInt(tt[4]);\r
+ if (type.equals("pub")) {\r
+ try {\r
+ int n = feedmap.getInt("pubcount");\r
+ feedmap.put("pubcount", n + count);\r
+ } catch (JSONException e) {\r
+ feedmap.put("pubcount", count);\r
+ }\r
+ } else if (type.equals("del")) {\r
+ String subid = tt[3];\r
+ try {\r
+ int n = feedmap.getInt(subid);\r
+ feedmap.put(subid, n + count);\r
+ } catch (JSONException e) {\r
+ feedmap.put(subid, count);\r
+ }\r
+ }\r
+ }\r
+ line = lr.readLine();\r
+ }\r
+ lr.close();\r
+ String t = toHTML(jo);\r
+ switch (rtype) {\r
+ case 1:\r
+ t = t.replaceAll("<th>Date</th>", "<th>Week</th>");\r
+ break;\r
+ case 2:\r
+ t = t.replaceAll("<th>Date</th>", "<th>Month</th>");\r
+ break;\r
+ case 3:\r
+ t = t.replaceAll("<th>Date</th>", "<th>Year</th>");\r
+ break;\r
+ }\r
+ System.out.println(t);\r
+ } catch (Exception e) {\r
+ System.err.println(e);\r
+ e.printStackTrace();\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: LatencyReport.java,v 1.1 2013/10/28 18:06:53 eby Exp $\r
*/\r
public class LatencyReport extends ReportBase {\r
- private static final String SELECT_SQL =\r
- "select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +\r
- " where EVENT_TIME >= ? and EVENT_TIME <= ? order by PUBLISH_ID, EVENT_TIME";\r
-\r
- private class Event {\r
- public final String type;\r
- public final long time;\r
- public Event(String t, long tm) {\r
- type = t;\r
- time = tm;\r
- }\r
- }\r
- private class Counters {\r
- public final String id;\r
- public final int feedid;\r
- public final long clen;\r
- public final String fileid;\r
- public final List<Event> events;\r
- public Counters(String i, int fid, long c, String s) {\r
- id = i;\r
- feedid = fid;\r
- clen = c;\r
- fileid = s;\r
- events = new ArrayList<Event>();\r
- }\r
- private long pubtime;\r
- public void addEvent(String t, long tm) {\r
- events.add(new Event(t, tm));\r
- if (t.equals("pub"))\r
- pubtime = tm;\r
- }\r
- public long min() {\r
- long min = Long.MAX_VALUE;\r
- for (Event e : events) {\r
- if (e.type.equals("del")) {\r
- min = Math.min(min, e.time - pubtime);\r
- }\r
- }\r
- return min;\r
- }\r
- public long max() {\r
- long max = 0;\r
- for (Event e : events) {\r
- if (e.type.equals("del")) {\r
- max = Math.max(max, e.time - pubtime);\r
- }\r
- }\r
- return max;\r
- }\r
- public long avg() {\r
- long total = 0, c = 0;\r
- for (Event e : events) {\r
- if (e.type.equals("del")) {\r
- total += e.time - pubtime;\r
- c++;\r
- }\r
- }\r
- return (c == 0) ? 0 : total/c;\r
- }\r
- public int fanout() {\r
- int n = 0;\r
- for (Event e : events) {\r
- if (e.type.equals("del")) {\r
- n++;\r
- }\r
- }\r
- return n;\r
- }\r
- @Override\r
- public String toString() {\r
- return feedid + "," + fileid + "," + clen + "," + min() + "," + max() + "," + avg() + "," + fanout();\r
- }\r
- }\r
-\r
- @Override\r
- public void run() {\r
- long start = System.currentTimeMillis();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- ResultSet rs = ps.executeQuery();\r
- PrintWriter os = new PrintWriter(outfile);\r
- os.println("recordid,feedid,uri,size,min,max,avg,fanout");\r
- Counters c = null;\r
- while (rs.next()) {\r
- long etime = rs.getLong("EVENT_TIME");\r
- String type = rs.getString("TYPE");\r
- String id = rs.getString("PUBLISH_ID");\r
- String fid = rs.getString("FEED_FILEID");\r
- int feed = rs.getInt("FEEDID");\r
- long clen = rs.getLong("CONTENT_LENGTH");\r
- if (c != null && !id.equals(c.id)) {\r
- String line = id + "," + c.toString();\r
- os.println(line);\r
- c = null;\r
- }\r
- if (c == null) {\r
- c = new Counters(id, feed, clen, fid);\r
- }\r
- if (feed != c.feedid)\r
- System.err.println("Feed ID mismatch, "+feed+" <=> "+c.feedid);\r
- if (clen != c.clen)\r
- System.err.println("Cont Len mismatch, "+clen+" <=> "+c.clen);\r
-// if (fid != c.fileid)\r
-// System.err.println("File ID mismatch, "+fid+" <=> "+c.fileid);\r
- c.addEvent(type, etime);\r
- }\r
- rs.close();\r
- ps.close();\r
- db.release(conn);\r
- os.close();\r
- } catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- }\r
+ private static final String SELECT_SQL =\r
+ "select EVENT_TIME, TYPE, PUBLISH_ID, FEED_FILEID, FEEDID, CONTENT_LENGTH from LOG_RECORDS" +\r
+ " where EVENT_TIME >= ? and EVENT_TIME <= ? order by PUBLISH_ID, EVENT_TIME";\r
+\r
+ private class Event {\r
+ public final String type;\r
+ public final long time;\r
+\r
+ public Event(String t, long tm) {\r
+ type = t;\r
+ time = tm;\r
+ }\r
+ }\r
+\r
+ private class Counters {\r
+ public final String id;\r
+ public final int feedid;\r
+ public final long clen;\r
+ public final String fileid;\r
+ public final List<Event> events;\r
+\r
+ public Counters(String i, int fid, long c, String s) {\r
+ id = i;\r
+ feedid = fid;\r
+ clen = c;\r
+ fileid = s;\r
+ events = new ArrayList<Event>();\r
+ }\r
+\r
+ private long pubtime;\r
+\r
+ public void addEvent(String t, long tm) {\r
+ events.add(new Event(t, tm));\r
+ if (t.equals("pub"))\r
+ pubtime = tm;\r
+ }\r
+\r
+ public long min() {\r
+ long min = Long.MAX_VALUE;\r
+ for (Event e : events) {\r
+ if (e.type.equals("del")) {\r
+ min = Math.min(min, e.time - pubtime);\r
+ }\r
+ }\r
+ return min;\r
+ }\r
+\r
+ public long max() {\r
+ long max = 0;\r
+ for (Event e : events) {\r
+ if (e.type.equals("del")) {\r
+ max = Math.max(max, e.time - pubtime);\r
+ }\r
+ }\r
+ return max;\r
+ }\r
+\r
+ public long avg() {\r
+ long total = 0, c = 0;\r
+ for (Event e : events) {\r
+ if (e.type.equals("del")) {\r
+ total += e.time - pubtime;\r
+ c++;\r
+ }\r
+ }\r
+ return (c == 0) ? 0 : total / c;\r
+ }\r
+\r
+ public int fanout() {\r
+ int n = 0;\r
+ for (Event e : events) {\r
+ if (e.type.equals("del")) {\r
+ n++;\r
+ }\r
+ }\r
+ return n;\r
+ }\r
+\r
+ @Override\r
+ public String toString() {\r
+ return feedid + "," + fileid + "," + clen + "," + min() + "," + max() + "," + avg() + "," + fanout();\r
+ }\r
+ }\r
+\r
+ @Override\r
+ public void run() {\r
+ long start = System.currentTimeMillis();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
+ ps.setLong(1, from);\r
+ ps.setLong(2, to);\r
+ ResultSet rs = ps.executeQuery();\r
+ PrintWriter os = new PrintWriter(outfile);\r
+ os.println("recordid,feedid,uri,size,min,max,avg,fanout");\r
+ Counters c = null;\r
+ while (rs.next()) {\r
+ long etime = rs.getLong("EVENT_TIME");\r
+ String type = rs.getString("TYPE");\r
+ String id = rs.getString("PUBLISH_ID");\r
+ String fid = rs.getString("FEED_FILEID");\r
+ int feed = rs.getInt("FEEDID");\r
+ long clen = rs.getLong("CONTENT_LENGTH");\r
+ if (c != null && !id.equals(c.id)) {\r
+ String line = id + "," + c.toString();\r
+ os.println(line);\r
+ c = null;\r
+ }\r
+ if (c == null) {\r
+ c = new Counters(id, feed, clen, fid);\r
+ }\r
+ if (feed != c.feedid)\r
+ System.err.println("Feed ID mismatch, " + feed + " <=> " + c.feedid);\r
+ if (clen != c.clen)\r
+ System.err.println("Cont Len mismatch, " + clen + " <=> " + c.clen);\r
+// if (fid != c.fileid)\r
+// System.err.println("File ID mismatch, "+fid+" <=> "+c.fileid);\r
+ c.addEvent(type, etime);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ db.release(conn);\r
+ os.close();\r
+ } catch (FileNotFoundException e) {\r
+ System.err.println("File cannot be written: " + outfile);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: Report.java,v 1.2 2013/11/06 16:23:55 eby Exp $\r
*/\r
public class Report {\r
- /**\r
- * Generate .csv report files from the database. Usage:\r
- * <pre>\r
- * java org.onap.dmaap.datarouter.reports.Report [ -t <i>type</i> ] [ -o <i>outfile</i> ] [ <i>fromdate</i> [ <i>todate</i> ]]\r
- * </pre>\r
- * <i>type</i> should be <b>volume</b> for a {@link VolumeReport},\r
- * <b>feed</b> for a {@link FeedReport},\r
- * <b>latency</b> for a {@link LatencyReport}, or\r
- * <b>dailyLatency</b> for a {@link DailyLatencyReport}.\r
- * If <i>outfile</i> is not specified, the report goes into a file <i>/tmp/nnnnnnnnnnnnn.csv</i>,\r
- * where nnnnnnnnnnnnn is the current time in milliseconds.\r
- * If <i>from</i> and <i>to</i> are not specified, then the report is limited to the last weeks worth of data.\r
- * <i>from</i> can be the keyword <b>ALL</b> to specify all data in the DB, or the keyword <b>yesterday</b>.\r
- * Otherwise, <i>from</i> and <i>to</i> should match the pattern YYYY-MM-DD.\r
- * @param args the command line arguments\r
- */\r
- public static void main(String[] args) {\r
- ReportBase report = new VolumeReport();\r
- String outfile = "/tmp/" + System.currentTimeMillis() + ".csv";\r
- String from = null, to = null;\r
+ /**\r
+ * Generate .csv report files from the database. Usage:\r
+ * <pre>\r
+ * java org.onap.dmaap.datarouter.reports.Report [ -t <i>type</i> ] [ -o <i>outfile</i> ] [ <i>fromdate</i> [ <i>todate</i> ]]\r
+ * </pre>\r
+ * <i>type</i> should be <b>volume</b> for a {@link VolumeReport},\r
+ * <b>feed</b> for a {@link FeedReport},\r
+ * <b>latency</b> for a {@link LatencyReport}, or\r
+ * <b>dailyLatency</b> for a {@link DailyLatencyReport}.\r
+ * If <i>outfile</i> is not specified, the report goes into a file <i>/tmp/nnnnnnnnnnnnn.csv</i>,\r
+ * where nnnnnnnnnnnnn is the current time in milliseconds.\r
+ * If <i>from</i> and <i>to</i> are not specified, then the report is limited to the last weeks worth of data.\r
+ * <i>from</i> can be the keyword <b>ALL</b> to specify all data in the DB, or the keyword <b>yesterday</b>.\r
+ * Otherwise, <i>from</i> and <i>to</i> should match the pattern YYYY-MM-DD.\r
+ *\r
+ * @param args the command line arguments\r
+ */\r
+ public static void main(String[] args) {\r
+ ReportBase report = new VolumeReport();\r
+ String outfile = "/tmp/" + System.currentTimeMillis() + ".csv";\r
+ String from = null, to = null;\r
\r
- for (int i = 0; i < args.length; i++) {\r
- if (args[i].equals("-?")) {\r
- System.err.println("usage: java org.onap.dmaap.datarouter.reports.Report [ -t <i>type</i> ] [ -o <i>outfile</i> ] [ <i>fromdate</i> [ <i>todate</i> ]]");\r
- System.exit(0);\r
- } else if (args[i].equals("-o")) {\r
- if (++i < args.length) {\r
- outfile = args[i];\r
- }\r
- } else if (args[i].equals("-t")) {\r
- if (++i < args.length) {\r
- String base = args[i];\r
- base = Character.toUpperCase(base.charAt(0)) + base.substring(1);\r
- base = "org.onap.dmaap.datarouter.reports."+base+"Report";\r
- try {\r
- @SuppressWarnings("unchecked")\r
- Class<? extends ReportBase> cl = (Class<? extends ReportBase>) Class.forName(base);\r
- Constructor<? extends ReportBase> con = cl.getConstructor();\r
- report = con.newInstance();\r
- } catch (Exception e) {\r
- System.err.println("Unknown report type: "+args[i]);\r
- System.exit(1);\r
- }\r
- }\r
- } else if (from == null) {\r
- from = args[i];\r
- } else {\r
- to = args[i];\r
- }\r
- }\r
- long lfrom = 0, lto = 0;\r
- if (from == null) {\r
- // last 7 days\r
- TimeZone utc = TimeZone.getTimeZone("UTC");\r
- Calendar cal = new GregorianCalendar(utc);\r
- cal.set(Calendar.HOUR_OF_DAY, 0);\r
- cal.set(Calendar.MINUTE, 0);\r
- cal.set(Calendar.SECOND, 0);\r
- cal.set(Calendar.MILLISECOND, 0);\r
- lfrom = cal.getTimeInMillis() - (7 * 24 * 60 * 60 * 1000L); // 1 week\r
- lto = cal.getTimeInMillis() - 1;\r
- } else if (to == null) {\r
- try {\r
- String[] dates = getDates(from);\r
- lfrom = Long.parseLong(dates[0]);\r
- lto = Long.parseLong(dates[1]);\r
- } catch (Exception e) {\r
- System.err.println("Invalid date: "+from);\r
- System.exit(1);\r
- }\r
- } else {\r
- String[] dates;\r
- try {\r
- dates = getDates(from);\r
- lfrom = Long.parseLong(dates[0]);\r
- } catch (Exception e) {\r
- System.err.println("Invalid date: "+from);\r
- System.exit(1);\r
- }\r
- try {\r
- dates = getDates(to);\r
- lto = Long.parseLong(dates[0]);\r
- } catch (Exception e) {\r
- System.err.println("Invalid date: "+to);\r
- System.exit(1);\r
- }\r
- }\r
+ for (int i = 0; i < args.length; i++) {\r
+ if (args[i].equals("-?")) {\r
+ System.err.println("usage: java org.onap.dmaap.datarouter.reports.Report [ -t <i>type</i> ] [ -o <i>outfile</i> ] [ <i>fromdate</i> [ <i>todate</i> ]]");\r
+ System.exit(0);\r
+ } else if (args[i].equals("-o")) {\r
+ if (++i < args.length) {\r
+ outfile = args[i];\r
+ }\r
+ } else if (args[i].equals("-t")) {\r
+ if (++i < args.length) {\r
+ String base = args[i];\r
+ base = Character.toUpperCase(base.charAt(0)) + base.substring(1);\r
+ base = "org.onap.dmaap.datarouter.reports." + base + "Report";\r
+ try {\r
+ @SuppressWarnings("unchecked")\r
+ Class<? extends ReportBase> cl = (Class<? extends ReportBase>) Class.forName(base);\r
+ Constructor<? extends ReportBase> con = cl.getConstructor();\r
+ report = con.newInstance();\r
+ } catch (Exception e) {\r
+ System.err.println("Unknown report type: " + args[i]);\r
+ System.exit(1);\r
+ }\r
+ }\r
+ } else if (from == null) {\r
+ from = args[i];\r
+ } else {\r
+ to = args[i];\r
+ }\r
+ }\r
+ long lfrom = 0, lto = 0;\r
+ if (from == null) {\r
+ // last 7 days\r
+ TimeZone utc = TimeZone.getTimeZone("UTC");\r
+ Calendar cal = new GregorianCalendar(utc);\r
+ cal.set(Calendar.HOUR_OF_DAY, 0);\r
+ cal.set(Calendar.MINUTE, 0);\r
+ cal.set(Calendar.SECOND, 0);\r
+ cal.set(Calendar.MILLISECOND, 0);\r
+ lfrom = cal.getTimeInMillis() - (7 * 24 * 60 * 60 * 1000L); // 1 week\r
+ lto = cal.getTimeInMillis() - 1;\r
+ } else if (to == null) {\r
+ try {\r
+ String[] dates = getDates(from);\r
+ lfrom = Long.parseLong(dates[0]);\r
+ lto = Long.parseLong(dates[1]);\r
+ } catch (Exception e) {\r
+ System.err.println("Invalid date: " + from);\r
+ System.exit(1);\r
+ }\r
+ } else {\r
+ String[] dates;\r
+ try {\r
+ dates = getDates(from);\r
+ lfrom = Long.parseLong(dates[0]);\r
+ } catch (Exception e) {\r
+ System.err.println("Invalid date: " + from);\r
+ System.exit(1);\r
+ }\r
+ try {\r
+ dates = getDates(to);\r
+ lto = Long.parseLong(dates[0]);\r
+ } catch (Exception e) {\r
+ System.err.println("Invalid date: " + to);\r
+ System.exit(1);\r
+ }\r
+ }\r
\r
- report.setFrom(lfrom);\r
- report.setTo(lto);\r
- report.setOutputFile(outfile);\r
- report.run();\r
- }\r
+ report.setFrom(lfrom);\r
+ report.setTo(lto);\r
+ report.setOutputFile(outfile);\r
+ report.run();\r
+ }\r
\r
- private static String[] getDates(String d) throws Exception {\r
- if (d.equals("ALL"))\r
- return new String[] { "1", ""+System.currentTimeMillis() };\r
+ private static String[] getDates(String d) throws Exception {\r
+ if (d.equals("ALL"))\r
+ return new String[]{"1", "" + System.currentTimeMillis()};\r
\r
- TimeZone utc = TimeZone.getTimeZone("UTC");\r
- Calendar cal = new GregorianCalendar(utc);\r
- if (d.matches("20\\d\\d-\\d\\d-\\d\\d")) {\r
- cal.set(Calendar.YEAR, Integer.parseInt(d.substring(0, 4)));\r
- cal.set(Calendar.MONTH, Integer.parseInt(d.substring(5, 7))-1);\r
- cal.set(Calendar.DAY_OF_MONTH, Integer.parseInt(d.substring(8, 10)));\r
- } else if (d.equals("yesterday")) {\r
- cal.add(Calendar.DAY_OF_YEAR, -1);\r
- } else\r
- throw new Exception("wa?");\r
- cal.set(Calendar.HOUR_OF_DAY, 0);\r
- cal.set(Calendar.MINUTE, 0);\r
- cal.set(Calendar.SECOND, 0);\r
- cal.set(Calendar.MILLISECOND, 0);\r
- long start = cal.getTimeInMillis();\r
- long end = start + (24 * 60 * 60 * 1000L) - 1;\r
- return new String[] { ""+start, ""+end };\r
- }\r
+ TimeZone utc = TimeZone.getTimeZone("UTC");\r
+ Calendar cal = new GregorianCalendar(utc);\r
+ if (d.matches("20\\d\\d-\\d\\d-\\d\\d")) {\r
+ cal.set(Calendar.YEAR, Integer.parseInt(d.substring(0, 4)));\r
+ cal.set(Calendar.MONTH, Integer.parseInt(d.substring(5, 7)) - 1);\r
+ cal.set(Calendar.DAY_OF_MONTH, Integer.parseInt(d.substring(8, 10)));\r
+ } else if (d.equals("yesterday")) {\r
+ cal.add(Calendar.DAY_OF_YEAR, -1);\r
+ } else\r
+ throw new Exception("wa?");\r
+ cal.set(Calendar.HOUR_OF_DAY, 0);\r
+ cal.set(Calendar.MINUTE, 0);\r
+ cal.set(Calendar.SECOND, 0);\r
+ cal.set(Calendar.MILLISECOND, 0);\r
+ long start = cal.getTimeInMillis();\r
+ long end = start + (24 * 60 * 60 * 1000L) - 1;\r
+ return new String[]{"" + start, "" + end};\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: ReportBase.java,v 1.1 2013/10/28 18:06:53 eby Exp $\r
*/\r
abstract public class ReportBase implements Runnable {\r
- protected long from, to;\r
- protected String outfile;\r
- protected Logger logger;\r
-\r
- public ReportBase() {\r
- this.from = 0;\r
- this.to = System.currentTimeMillis();\r
- this.logger = Logger.getLogger("org.onap.dmaap.datarouter.reports");\r
- }\r
-\r
- public void setFrom(long from) {\r
- this.from = from;\r
- }\r
-\r
- public void setTo(long to) {\r
- this.to = to;\r
- }\r
-\r
- public String getOutfile() {\r
- return outfile;\r
- }\r
-\r
- public void setOutputFile(String s) {\r
- this.outfile = s;\r
- }\r
-\r
- @Override\r
- abstract public void run();\r
+ protected long from, to;\r
+ protected String outfile;\r
+ protected Logger logger;\r
+\r
+ public ReportBase() {\r
+ this.from = 0;\r
+ this.to = System.currentTimeMillis();\r
+ this.logger = Logger.getLogger("org.onap.dmaap.datarouter.reports");\r
+ }\r
+\r
+ public void setFrom(long from) {\r
+ this.from = from;\r
+ }\r
+\r
+ public void setTo(long to) {\r
+ this.to = to;\r
+ }\r
+\r
+ public String getOutfile() {\r
+ return outfile;\r
+ }\r
+\r
+ public void setOutputFile(String s) {\r
+ this.outfile = s;\r
+ }\r
+\r
+ @Override\r
+ abstract public void run();\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: SubscriberReport.java,v 1.2 2013/11/06 16:23:55 eby Exp $\r
*/\r
public class SubscriberReport extends ReportBase {\r
- private static final String SELECT_SQL =\r
- "select date(from_unixtime(EVENT_TIME div 1000)) as DATE, DELIVERY_SUBID, RESULT, COUNT(RESULT) as COUNT" +\r
- " from LOG_RECORDS" +\r
- " where TYPE = 'del' and EVENT_TIME >= ? and EVENT_TIME <= ?" +\r
- " group by DATE, DELIVERY_SUBID, RESULT";\r
- private static final String SELECT_SQL2 =\r
- "select date(from_unixtime(EVENT_TIME div 1000)) as DATE, DELIVERY_SUBID, COUNT(CONTENT_LENGTH_2) as COUNT" +\r
- " from LOG_RECORDS" +\r
- " where TYPE = 'dlx' and CONTENT_LENGTH_2 = -1 and EVENT_TIME >= ? and EVENT_TIME <= ?" +\r
- " group by DATE, DELIVERY_SUBID";\r
+ private static final String SELECT_SQL =\r
+ "select date(from_unixtime(EVENT_TIME div 1000)) as DATE, DELIVERY_SUBID, RESULT, COUNT(RESULT) as COUNT" +\r
+ " from LOG_RECORDS" +\r
+ " where TYPE = 'del' and EVENT_TIME >= ? and EVENT_TIME <= ?" +\r
+ " group by DATE, DELIVERY_SUBID, RESULT";\r
+ private static final String SELECT_SQL2 =\r
+ "select date(from_unixtime(EVENT_TIME div 1000)) as DATE, DELIVERY_SUBID, COUNT(CONTENT_LENGTH_2) as COUNT" +\r
+ " from LOG_RECORDS" +\r
+ " where TYPE = 'dlx' and CONTENT_LENGTH_2 = -1 and EVENT_TIME >= ? and EVENT_TIME <= ?" +\r
+ " group by DATE, DELIVERY_SUBID";\r
+\r
+ private class Counters {\r
+ private String date;\r
+ private int sub;\r
+ private int c100, c200, c300, c400, c500, cm1, cdlx;\r
+\r
+ public Counters(String date, int sub) {\r
+ this.date = date;\r
+ this.sub = sub;\r
+ c100 = c200 = c300 = c400 = c500 = cm1 = cdlx = 0;\r
+ }\r
+\r
+ public void addCounts(int status, int n) {\r
+ if (status < 0) {\r
+ cm1 += n;\r
+ } else if (status >= 100 && status <= 199) {\r
+ c100 += n;\r
+ } else if (status >= 200 && status <= 299) {\r
+ c200 += n;\r
+ } else if (status >= 300 && status <= 399) {\r
+ c300 += n;\r
+ } else if (status >= 400 && status <= 499) {\r
+ c400 += n;\r
+ } else if (status >= 500 && status <= 599) {\r
+ c500 += n;\r
+ }\r
+ }\r
+\r
+ public void addDlxCount(int n) {\r
+ cdlx += n;\r
+ }\r
\r
- private class Counters {\r
- private String date;\r
- private int sub;\r
- private int c100, c200, c300, c400, c500, cm1, cdlx;\r
- public Counters(String date, int sub) {\r
- this.date = date;\r
- this.sub = sub;\r
- c100 = c200 = c300 = c400 = c500 = cm1 = cdlx = 0;\r
- }\r
- public void addCounts(int status, int n) {\r
- if (status < 0) {\r
- cm1 += n;\r
- } else if (status >= 100 && status <= 199) {\r
- c100 += n;\r
- } else if (status >= 200 && status <= 299) {\r
- c200 += n;\r
- } else if (status >= 300 && status <= 399) {\r
- c300 += n;\r
- } else if (status >= 400 && status <= 499) {\r
- c400 += n;\r
- } else if (status >= 500 && status <= 599) {\r
- c500 += n;\r
- }\r
- }\r
- public void addDlxCount(int n) {\r
- cdlx += n;\r
- }\r
- @Override\r
- public String toString() {\r
- return date + "," + sub + "," +\r
- c100 + "," + c200 + "," + c300 + "," + c400 + "," + c500 + "," +\r
- cm1 + "," + cdlx;\r
- }\r
- }\r
+ @Override\r
+ public String toString() {\r
+ return date + "," + sub + "," +\r
+ c100 + "," + c200 + "," + c300 + "," + c400 + "," + c500 + "," +\r
+ cm1 + "," + cdlx;\r
+ }\r
+ }\r
\r
- @Override\r
- public void run() {\r
- Map<String, Counters> map = new HashMap<String, Counters>();\r
- long start = System.currentTimeMillis();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- ResultSet rs = ps.executeQuery();\r
- while (rs.next()) {\r
- String date = rs.getString("DATE");\r
- int sub = rs.getInt("DELIVERY_SUBID");\r
- int res = rs.getInt("RESULT");\r
- int count = rs.getInt("COUNT");\r
- String key = date + "," + sub;\r
- Counters c = map.get(key);\r
- if (c == null) {\r
- c = new Counters(date, sub);\r
- map.put(key, c);\r
- }\r
- c.addCounts(res, count);\r
- }\r
- rs.close();\r
- ps.close();\r
+ @Override\r
+ public void run() {\r
+ Map<String, Counters> map = new HashMap<String, Counters>();\r
+ long start = System.currentTimeMillis();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
+ ps.setLong(1, from);\r
+ ps.setLong(2, to);\r
+ ResultSet rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ String date = rs.getString("DATE");\r
+ int sub = rs.getInt("DELIVERY_SUBID");\r
+ int res = rs.getInt("RESULT");\r
+ int count = rs.getInt("COUNT");\r
+ String key = date + "," + sub;\r
+ Counters c = map.get(key);\r
+ if (c == null) {\r
+ c = new Counters(date, sub);\r
+ map.put(key, c);\r
+ }\r
+ c.addCounts(res, count);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
\r
- ps = conn.prepareStatement(SELECT_SQL2);\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- rs = ps.executeQuery();\r
- while (rs.next()) {\r
- String date = rs.getString("DATE");\r
- int sub = rs.getInt("DELIVERY_SUBID");\r
- int count = rs.getInt("COUNT");\r
- String key = date + "," + sub;\r
- Counters c = map.get(key);\r
- if (c == null) {\r
- c = new Counters(date, sub);\r
- map.put(key, c);\r
- }\r
- c.addDlxCount(count);\r
- }\r
- rs.close();\r
- ps.close();\r
+ ps = conn.prepareStatement(SELECT_SQL2);\r
+ ps.setLong(1, from);\r
+ ps.setLong(2, to);\r
+ rs = ps.executeQuery();\r
+ while (rs.next()) {\r
+ String date = rs.getString("DATE");\r
+ int sub = rs.getInt("DELIVERY_SUBID");\r
+ int count = rs.getInt("COUNT");\r
+ String key = date + "," + sub;\r
+ Counters c = map.get(key);\r
+ if (c == null) {\r
+ c = new Counters(date, sub);\r
+ map.put(key, c);\r
+ }\r
+ c.addDlxCount(count);\r
+ }\r
+ rs.close();\r
+ ps.close();\r
\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- try {\r
- PrintWriter os = new PrintWriter(outfile);\r
- os.println("date,subid,count100,count200,count300,count400,count500,countminus1,countdlx");\r
- for (String key : new TreeSet<String>(map.keySet())) {\r
- Counters c = map.get(key);\r
- os.println(c.toString());\r
- }\r
- os.close();\r
- } catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
- }\r
- }\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
+ try {\r
+ PrintWriter os = new PrintWriter(outfile);\r
+ os.println("date,subid,count100,count200,count300,count400,count500,countminus1,countdlx");\r
+ for (String key : new TreeSet<String>(map.keySet())) {\r
+ Counters c = map.get(key);\r
+ os.println(c.toString());\r
+ }\r
+ os.close();\r
+ } catch (FileNotFoundException e) {\r
+ System.err.println("File cannot be written: " + outfile);\r
+ }\r
+ }\r
}\r
* * Licensed under the Apache License, Version 2.0 (the "License");\r
* * you may not use this file except in compliance with the License.\r
* * You may obtain a copy of the License at\r
- * * \r
+ * *\r
* * http://www.apache.org/licenses/LICENSE-2.0\r
- * * \r
+ * *\r
* * Unless required by applicable law or agreed to in writing, software\r
* * distributed under the License is distributed on an "AS IS" BASIS,\r
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
* @version $Id: VolumeReport.java,v 1.3 2014/02/28 15:11:13 eby Exp $\r
*/\r
public class VolumeReport extends ReportBase {\r
- private static final String SELECT_SQL = "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT" +\r
- " from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?";\r
+ private static final String SELECT_SQL = "select EVENT_TIME, TYPE, FEEDID, CONTENT_LENGTH, RESULT" +\r
+ " from LOG_RECORDS where EVENT_TIME >= ? and EVENT_TIME <= ? LIMIT ?, ?";\r
+\r
+ private class Counters {\r
+ public int filespublished, filesdelivered, filesexpired;\r
+ public long bytespublished, bytesdelivered, bytesexpired;\r
\r
- private class Counters {\r
- public int filespublished, filesdelivered, filesexpired;\r
- public long bytespublished, bytesdelivered, bytesexpired;\r
- @Override\r
- public String toString() {\r
- return String.format("%d,%d,%d,%d,%d,%d",\r
- filespublished, bytespublished, filesdelivered,\r
- bytesdelivered, filesexpired, bytesexpired);\r
- }\r
- }\r
+ @Override\r
+ public String toString() {\r
+ return String.format("%d,%d,%d,%d,%d,%d",\r
+ filespublished, bytespublished, filesdelivered,\r
+ bytesdelivered, filesexpired, bytesexpired);\r
+ }\r
+ }\r
\r
- @Override\r
- public void run() {\r
- Map<String, Counters> map = new HashMap<String, Counters>();\r
- SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
- long start = System.currentTimeMillis();\r
- try {\r
- DB db = new DB();\r
- @SuppressWarnings("resource")\r
- Connection conn = db.getConnection();\r
- // We need to run this SELECT in stages, because otherwise we run out of memory!\r
- final long stepsize = 6000000L;\r
- boolean go_again = true;\r
- for (long i = 0; go_again; i += stepsize) {\r
- PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
- ps.setLong(1, from);\r
- ps.setLong(2, to);\r
- ps.setLong(3, i);\r
- ps.setLong(4, stepsize);\r
- ResultSet rs = ps.executeQuery();\r
- go_again = false;\r
- while (rs.next()) {\r
- go_again = true;\r
- long etime = rs.getLong("EVENT_TIME");\r
- String type = rs.getString("TYPE");\r
- int feed = rs.getInt("FEEDID");\r
- long clen = rs.getLong("CONTENT_LENGTH");\r
- String key = sdf.format(new Date(etime)) + ":" + feed;\r
- Counters c = map.get(key);\r
- if (c == null) {\r
- c = new Counters();\r
- map.put(key, c);\r
- }\r
- if (type.equalsIgnoreCase("pub")) {\r
- c.filespublished++;\r
- c.bytespublished += clen;\r
- } else if (type.equalsIgnoreCase("del")) {\r
- // Only count successful deliveries\r
- int statusCode = rs.getInt("RESULT");\r
- if (statusCode >= 200 && statusCode < 300) {\r
- c.filesdelivered++;\r
- c.bytesdelivered += clen;\r
- }\r
- } else if (type.equalsIgnoreCase("exp")) {\r
- c.filesexpired++;\r
- c.bytesexpired += clen;\r
- }\r
- }\r
- rs.close();\r
- ps.close();\r
- }\r
- db.release(conn);\r
- } catch (SQLException e) {\r
- e.printStackTrace();\r
- }\r
- logger.debug("Query time: " + (System.currentTimeMillis()-start) + " ms");\r
- try {\r
- PrintWriter os = new PrintWriter(outfile);\r
- os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");\r
- for (String key : new TreeSet<String>(map.keySet())) {\r
- Counters c = map.get(key);\r
- String[] p = key.split(":");\r
- os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));\r
- }\r
- os.close();\r
- } catch (FileNotFoundException e) {\r
- System.err.println("File cannot be written: "+outfile);\r
- }\r
- }\r
+ @Override\r
+ public void run() {\r
+ Map<String, Counters> map = new HashMap<String, Counters>();\r
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");\r
+ long start = System.currentTimeMillis();\r
+ try {\r
+ DB db = new DB();\r
+ @SuppressWarnings("resource")\r
+ Connection conn = db.getConnection();\r
+ // We need to run this SELECT in stages, because otherwise we run out of memory!\r
+ final long stepsize = 6000000L;\r
+ boolean go_again = true;\r
+ for (long i = 0; go_again; i += stepsize) {\r
+ PreparedStatement ps = conn.prepareStatement(SELECT_SQL);\r
+ ps.setLong(1, from);\r
+ ps.setLong(2, to);\r
+ ps.setLong(3, i);\r
+ ps.setLong(4, stepsize);\r
+ ResultSet rs = ps.executeQuery();\r
+ go_again = false;\r
+ while (rs.next()) {\r
+ go_again = true;\r
+ long etime = rs.getLong("EVENT_TIME");\r
+ String type = rs.getString("TYPE");\r
+ int feed = rs.getInt("FEEDID");\r
+ long clen = rs.getLong("CONTENT_LENGTH");\r
+ String key = sdf.format(new Date(etime)) + ":" + feed;\r
+ Counters c = map.get(key);\r
+ if (c == null) {\r
+ c = new Counters();\r
+ map.put(key, c);\r
+ }\r
+ if (type.equalsIgnoreCase("pub")) {\r
+ c.filespublished++;\r
+ c.bytespublished += clen;\r
+ } else if (type.equalsIgnoreCase("del")) {\r
+ // Only count successful deliveries\r
+ int statusCode = rs.getInt("RESULT");\r
+ if (statusCode >= 200 && statusCode < 300) {\r
+ c.filesdelivered++;\r
+ c.bytesdelivered += clen;\r
+ }\r
+ } else if (type.equalsIgnoreCase("exp")) {\r
+ c.filesexpired++;\r
+ c.bytesexpired += clen;\r
+ }\r
+ }\r
+ rs.close();\r
+ ps.close();\r
+ }\r
+ db.release(conn);\r
+ } catch (SQLException e) {\r
+ e.printStackTrace();\r
+ }\r
+ logger.debug("Query time: " + (System.currentTimeMillis() - start) + " ms");\r
+ try {\r
+ PrintWriter os = new PrintWriter(outfile);\r
+ os.println("date,feedid,filespublished,bytespublished,filesdelivered,bytesdelivered,filesexpired,bytesexpired");\r
+ for (String key : new TreeSet<String>(map.keySet())) {\r
+ Counters c = map.get(key);\r
+ String[] p = key.split(":");\r
+ os.println(String.format("%s,%s,%s", p[0], p[1], c.toString()));\r
+ }\r
+ os.close();\r
+ } catch (FileNotFoundException e) {\r
+ System.err.println("File cannot be written: " + outfile);\r
+ }\r
+ }\r
}\r
-#-------------------------------------------------------------------------------\r
-# ============LICENSE_START==================================================\r
-# * org.onap.dmaap\r
-# * ===========================================================================\r
-# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
-# * ===========================================================================\r
-# * Licensed under the Apache License, Version 2.0 (the "License");\r
-# * you may not use this file except in compliance with the License.\r
-# * You may obtain a copy of the License at\r
-# * \r
-# * http://www.apache.org/licenses/LICENSE-2.0\r
-# * \r
-# * Unless required by applicable law or agreed to in writing, software\r
-# * distributed under the License is distributed on an "AS IS" BASIS,\r
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-# * See the License for the specific language governing permissions and\r
-# * limitations under the License.\r
-# * ============LICENSE_END====================================================\r
-# *\r
-# * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
-# *\r
-#-------------------------------------------------------------------------------\r
-{\r
- "name": "Jettydemo",\r
- "version": "m1.0",\r
- "description": "Jettydemo",\r
- "business_description": "Jettydemo",\r
- "suspend": false,\r
- "deleted": false,\r
- "changeowner": true,\r
- "authorization": {\r
- "classification": "unclassified",\r
- "endpoint_addrs": [\r
- "172.100.0.3",\r
- ],\r
- "endpoint_ids": [\r
- {\r
- "password": "rs873m",\r
- "id": "rs873m"\r
- }\r
- ]\r
- },\r
-}\r
-\r
+#-------------------------------------------------------------------------------
+# ============LICENSE_START==================================================
+# * org.onap.dmaap
+# * ===========================================================================
+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# * ===========================================================================
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# * ============LICENSE_END====================================================
+# *
+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+# *
+#-------------------------------------------------------------------------------
+{
+ "name": "Jettydemo",
+ "version": "m1.0",
+ "description": "Jettydemo",
+ "business_description": "Jettydemo",
+ "suspend": false,
+ "deleted": false,
+ "changeowner": true,
+ "authorization": {
+ "classification": "unclassified",
+ "endpoint_addrs": [
+ "172.100.0.3",
+ ],
+ "endpoint_ids": [
+ {
+ "password": "rs873m",
+ "id": "rs873m"
+ }
+ ]
+ },
+}
+
-#-------------------------------------------------------------------------------\r
-# ============LICENSE_START==================================================\r
-# * org.onap.dmaap\r
-# * ===========================================================================\r
-# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
-# * ===========================================================================\r
-# * Licensed under the Apache License, Version 2.0 (the "License");\r
-# * you may not use this file except in compliance with the License.\r
-# * You may obtain a copy of the License at\r
-# * \r
-# * http://www.apache.org/licenses/LICENSE-2.0\r
-# * \r
-# * Unless required by applicable law or agreed to in writing, software\r
-# * distributed under the License is distributed on an "AS IS" BASIS,\r
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-# * See the License for the specific language governing permissions and\r
-# * limitations under the License.\r
-# * ============LICENSE_END====================================================\r
-# *\r
-# * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
-# *\r
-#-------------------------------------------------------------------------------\r
-{ \r
- "delivery" : \r
- \r
- { \r
- "url" : "http://172.100.0.3:7070/",\r
- "user" : "datarouter",\r
- "password" : "datarouter",\r
- "use100" : true \r
- },\r
- "metadataOnly" : false, \r
- "suspend" : false, \r
- "groupid" : 29,\r
- "subscriber" : "sg481n"\r
-}\r
+#-------------------------------------------------------------------------------
+# ============LICENSE_START==================================================
+# * org.onap.dmaap
+# * ===========================================================================
+# * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# * ===========================================================================
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# * ============LICENSE_END====================================================
+# *
+# * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+# *
+#-------------------------------------------------------------------------------
+{
+ "delivery" :
+
+ {
+ "url" : "http://172.100.0.3:7070/",
+ "user" : "datarouter",
+ "password" : "datarouter",
+ "use100" : true
+ },
+ "metadataOnly" : false,
+ "suspend" : false,
+ "groupid" : 29,
+ "subscriber" : "sg481n"
+}
-<!--\r
- ============LICENSE_START==================================================\r
- * org.onap.dmaap\r
- * ===========================================================================\r
- * Copyright © 2017 AT&T Intellectual Property. All rights reserved.\r
- * ===========================================================================\r
- * Licensed under the Apache License, Version 2.0 (the "License");\r
- * you may not use this file except in compliance with the License.\r
- * You may obtain a copy of the License at\r
- * \r
- * http://www.apache.org/licenses/LICENSE-2.0\r
- * \r
- * Unless required by applicable law or agreed to in writing, software\r
- * distributed under the License is distributed on an "AS IS" BASIS,\r
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
- * See the License for the specific language governing permissions and\r
- * limitations under the License.\r
- * ============LICENSE_END====================================================\r
- *\r
- * ECOMP is a trademark and service mark of AT&T Intellectual Property.\r
- *\r
--->\r
-<configuration scan="true" scanPeriod="3 seconds" debug="true">\r
- <!--<jmxConfigurator /> -->\r
- <!-- directory path for all other type logs -->\r
- <!-- property name="logDir" value="/home/eby/dr2/logs" / -->\r
- <property name="logDir" value="/opt/app/datartr/logs" />\r
- \r
- <!-- directory path for debugging type logs -->\r
- <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->\r
- \r
- <!-- specify the component name \r
- <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->\r
- <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->\r
- <!-- property name="componentName" value="logs"></property -->\r
- \r
- <!-- log file names -->\r
- <property name="generalLogName" value="apicalls" />\r
- <!-- name="securityLogName" value="security" -->\r
- <!-- name="performanceLogName" value="performance" -->\r
- <!-- name="serverLogName" value="server" -->\r
- <!-- name="policyLogName" value="policy"-->\r
- <property name="errorLogName" value="errors" />\r
- <!-- name="metricsLogName" value="metrics" -->\r
- <!-- name="auditLogName" value="audit" -->\r
- <!-- name="debugLogName" value="debug" -->\r
- <property name="jettyLogName" value="jetty"></property> \r
- <property name="defaultPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />\r
- <property name="jettyLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />\r
- \r
- <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />\r
- \r
- <property name="logDirectory" value="${logDir}" />\r
- <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->\r
- \r
- \r
- <!-- Example evaluator filter applied against console appender -->\r
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
-\r
- <!-- ============================================================================ -->\r
- <!-- EELF Appenders -->\r
- <!-- ============================================================================ -->\r
-\r
- <!-- The EELFAppender is used to record events to the general application \r
- log -->\r
- \r
- \r
- <appender name="EELF"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${generalLogName}.log</file>\r
- <filter class="ch.qos.logback.classic.filter.LevelFilter">\r
- <level>INFO</level>\r
- <onMatch>ACCEPT</onMatch>\r
- <onMismatch>DENY</onMismatch>\r
- </filter>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELF" />\r
- </appender>\r
-\r
- <!-- EELF Security Appender. This appender is used to record security events \r
- to the security log file. Security events are separate from other loggers \r
- in EELF so that security log records can be captured and managed in a secure \r
- way separate from the other logs. This appender is set to never discard any \r
- events. -->\r
- <!--appender name="EELFSecurity"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${securityLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <discardingThreshold>0</discardingThreshold>\r
- <appender-ref ref="EELFSecurity" />\r
- </appender-->\r
-\r
- <!-- EELF Performance Appender. This appender is used to record performance \r
- records. -->\r
- <!--appender name="EELFPerformance"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${performanceLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <outputPatternAsHeader>true</outputPatternAsHeader>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFPerformance" />\r
- </appender-->\r
-\r
- <!-- EELF Server Appender. This appender is used to record Server related \r
- logging events. The Server logger and appender are specializations of the \r
- EELF application root logger and appender. This can be used to segregate Server \r
- events from other components, or it can be eliminated to record these events \r
- as part of the application root log. -->\r
- <!--appender name="EELFServer"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${serverLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFServer" />\r
- </appender-->\r
-\r
- \r
- <!-- EELF Policy Appender. This appender is used to record Policy engine \r
- related logging events. The Policy logger and appender are specializations \r
- of the EELF application root logger and appender. This can be used to segregate \r
- Policy engine events from other components, or it can be eliminated to record \r
- these events as part of the application root log. -->\r
- <!--appender name="EELFPolicy"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${policyLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFPolicy" >\r
- </appender-->\r
- \r
- \r
- <!-- EELF Audit Appender. This appender is used to record audit engine \r
- related logging events. The audit logger and appender are specializations \r
- of the EELF application root logger and appender. This can be used to segregate \r
- Policy engine events from other components, or it can be eliminated to record \r
- these events as part of the application root log. -->\r
- \r
- <!--appender name="EELFAudit"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${auditLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFAudit" />\r
- </appender-->\r
-\r
-<!--appender name="EELFMetrics"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${metricsLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder-->\r
- <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - \r
- %msg%n"</pattern> -->\r
- <!--pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- \r
- <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFMetrics"/>\r
- </appender-->\r
- \r
- <appender name="EELFError"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${errorLogName}.log</file>\r
- <filter class="ch.qos.logback.classic.filter.LevelFilter">\r
- <level>ERROR</level>\r
- <onMatch>ACCEPT</onMatch>\r
- <onMismatch>DENY</onMismatch>\r
- </filter>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${defaultPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFError"/>\r
- </appender>\r
- \r
- <!-- ============================================================================ -->\r
- <appender name="jettylog"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${logDirectory}/${jettyLogName}.log</file>\r
- <filter class="org.onap.dmaap.datarouter.provisioning.eelf.JettyFilter" />\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${logDirectory}/${jettyLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${jettyLoggerPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFjettylog" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="jettylog" />\r
- <includeCallerData>true</includeCallerData>\r
- </appender>\r
- \r
- <!-- ============================================================================ -->\r
-\r
-\r
- <!--appender name="EELFDebug"\r
- class="ch.qos.logback.core.rolling.RollingFileAppender">\r
- <file>${debugLogDirectory}/${debugLogName}.log</file>\r
- <rollingPolicy\r
- class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
- <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip\r
- </fileNamePattern>\r
- <minIndex>1</minIndex>\r
- <maxIndex>9</maxIndex>\r
- </rollingPolicy>\r
- <triggeringPolicy\r
- class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
- <maxFileSize>5MB</maxFileSize>\r
- </triggeringPolicy>\r
- <encoder>\r
- <pattern>${debugLoggerPattern}</pattern>\r
- </encoder>\r
- </appender>\r
- \r
- <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">\r
- <queueSize>256</queueSize>\r
- <appender-ref ref="EELFDebug" />\r
- <includeCallerData>true</includeCallerData>\r
- </appender-->\r
- \r
- \r
- <!-- ============================================================================ -->\r
- <!-- EELF loggers -->\r
- <!-- ============================================================================ -->\r
- <logger name="com.att.eelf" level="info" additivity="false">\r
- <appender-ref ref="asyncEELF" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.error" level="error" additivity="false">\r
- <appender-ref ref="asyncEELFError" />\r
- </logger>\r
- \r
- <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">\r
- <appender-ref ref="asyncEELFjettylog"/>\r
- </logger> \r
- \r
- <!-- logger name="com.att.eelf.security" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFSecurity" /> \r
- </logger>\r
- <logger name="com.att.eelf.perf" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFPerformance" />\r
- </logger>\r
- <logger name="com.att.eelf.server" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFServer" />\r
- </logger>\r
- <logger name="com.att.eelf.policy" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFPolicy" />\r
- </logger>\r
-\r
- <logger name="com.att.eelf.audit" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFAudit" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.metrics" level="info" additivity="false">\r
- <appender-ref ref="asyncEELFMetrics" />\r
- </logger>\r
- \r
- <logger name="com.att.eelf.debug" level="debug" additivity="false">\r
- <appender-ref ref="asyncEELFDebug" />\r
- </logger-->\r
-\r
- \r
-\r
- \r
- <root level="INFO">\r
- <appender-ref ref="asyncEELF" />\r
- <appender-ref ref="asyncEELFError" />\r
- <appender-ref ref="asyncEELFjettylog" />\r
- </root>\r
-\r
-</configuration>\r
+<!--
+ ============LICENSE_START==================================================
+ * org.onap.dmaap
+ * ===========================================================================
+ * Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ * ===========================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END====================================================
+ *
+ * ECOMP is a trademark and service mark of AT&T Intellectual Property.
+ *
+-->
+<configuration scan="true" scanPeriod="3 seconds" debug="true">
+ <!--<jmxConfigurator /> -->
+ <!-- directory path for all other type logs -->
+ <!-- property name="logDir" value="/home/eby/dr2/logs" / -->
+ <property name="logDir" value="/opt/app/datartr/logs" />
+
+ <!-- directory path for debugging type logs -->
+ <!-- property name="debugDir" value="/home/eby/dr2/debug-logs" /-->
+
+ <!-- specify the component name
+ <ECOMP-component-name>::= "MSO" | "DCAE" | "ASDC " | "AAI" |"Policy" | "SDNC" | "AC" -->
+ <!-- This creates the MSO directory in in the LogDir which is not needed, mentioned last directory of the path-->
+ <!-- property name="componentName" value="logs"></property -->
+
+ <!-- log file names -->
+ <property name="generalLogName" value="apicalls" />
+ <!-- name="securityLogName" value="security" -->
+ <!-- name="performanceLogName" value="performance" -->
+ <!-- name="serverLogName" value="server" -->
+ <!-- name="policyLogName" value="policy"-->
+ <property name="errorLogName" value="errors" />
+ <!-- name="metricsLogName" value="metrics" -->
+ <!-- name="auditLogName" value="audit" -->
+ <!-- name="debugLogName" value="debug" -->
+ <property name="jettyLogName" value="jetty"></property>
+ <property name="defaultPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|%msg%n" />
+ <property name="jettyLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%logger|%thread|%.-5level|%msg%n" />
+
+ <property name="debugLoggerPattern" value="%d{MM/dd-HH:mm:ss.SSS}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{Timer}|[%caller{3}]|%msg%n" />
+
+ <property name="logDirectory" value="${logDir}" />
+ <!-- property name="debugLogDirectory" value="${debugDir}/${componentName}" /-->
+
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${generalLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELF" />
+ </appender>
+
+ <!-- EELF Security Appender. This appender is used to record security events
+ to the security log file. Security events are separate from other loggers
+ in EELF so that security log records can be captured and managed in a secure
+ way separate from the other logs. This appender is set to never discard any
+ events. -->
+ <!--appender name="EELFSecurity"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${securityLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="EELFSecurity" />
+ </appender-->
+
+ <!-- EELF Performance Appender. This appender is used to record performance
+ records. -->
+ <!--appender name="EELFPerformance"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${performanceLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <outputPatternAsHeader>true</outputPatternAsHeader>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPerformance" />
+ </appender-->
+
+ <!-- EELF Server Appender. This appender is used to record Server related
+ logging events. The Server logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate Server
+ events from other components, or it can be eliminated to record these events
+ as part of the application root log. -->
+ <!--appender name="EELFServer"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${serverLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFServer" />
+ </appender-->
+
+
+ <!-- EELF Policy Appender. This appender is used to record Policy engine
+ related logging events. The Policy logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+ <!--appender name="EELFPolicy"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${policyLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPolicy" >
+ </appender-->
+
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+
+ <!--appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${auditLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender-->
+
+<!--appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder-->
+ <!-- <pattern>"%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} -
+ %msg%n"</pattern> -->
+ <!--pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics"/>
+ </appender-->
+
+ <appender name="EELFError"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${errorLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFError"/>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <appender name="jettylog"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${jettyLogName}.log</file>
+ <filter class="org.onap.dmaap.datarouter.provisioning.eelf.JettyFilter" />
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${jettyLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${jettyLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFjettylog" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="jettylog" />
+ <includeCallerData>true</includeCallerData>
+ </appender>
+
+ <!-- ============================================================================ -->
+
+
+ <!--appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${debugLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${debugLogDirectory}/${debugLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${debugLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ <includeCallerData>true</includeCallerData>
+ </appender-->
+
+
+ <!-- ============================================================================ -->
+ <!-- EELF loggers -->
+ <!-- ============================================================================ -->
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ </logger>
+
+ <logger name="com.att.eelf.error" level="error" additivity="false">
+ <appender-ref ref="asyncEELFError" />
+ </logger>
+
+ <logger name="log4j.logger.org.eclipse.jetty" additivity="false" level="info">
+ <appender-ref ref="asyncEELFjettylog"/>
+ </logger>
+
+ <!-- logger name="com.att.eelf.security" level="info" additivity="false">
+ <appender-ref ref="asyncEELFSecurity" />
+ </logger>
+ <logger name="com.att.eelf.perf" level="info" additivity="false">
+ <appender-ref ref="asyncEELFPerformance" />
+ </logger>
+ <logger name="com.att.eelf.server" level="info" additivity="false">
+ <appender-ref ref="asyncEELFServer" />
+ </logger>
+ <logger name="com.att.eelf.policy" level="info" additivity="false">
+ <appender-ref ref="asyncEELFPolicy" />
+ </logger>
+
+ <logger name="com.att.eelf.audit" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <logger name="com.att.eelf.debug" level="debug" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+ </logger-->
+
+
+
+
+ <root level="INFO">
+ <appender-ref ref="asyncEELF" />
+ <appender-ref ref="asyncEELFError" />
+ <appender-ref ref="asyncEELFjettylog" />
+ </root>
+
+</configuration>
do
case "$action" in
'stop')
- /opt/app/platform/init.d/drtrprov stop
- ;;
+ /opt/app/platform/init.d/drtrprov stop
+ ;;
'start')
- /opt/app/platform/init.d/drtrprov start || exit 1
- ;;
+ /opt/app/platform/init.d/drtrprov start || exit 1
+ ;;
'backup')
- cp log4j.properties log4j.properties.save 2>/dev/null
- cp provserver.properties provserver.properties.save 2>/dev/null
- cp mail.properties mail.properties.save 2>/dev/null
- cp havecert havecert.save 2>/dev/null
- cp mysql_init_0001 mysql_init_0001.save 2>/dev/null
- ;;
+ cp log4j.properties log4j.properties.save 2>/dev/null
+ cp provserver.properties provserver.properties.save 2>/dev/null
+ cp mail.properties mail.properties.save 2>/dev/null
+ cp havecert havecert.save 2>/dev/null
+ cp mysql_init_0001 mysql_init_0001.save 2>/dev/null
+ ;;
'restore')
- cp log4j.properties.save log4j.properties 2>/dev/null
- cp provserver.properties.save provserver.properties 2>/dev/null
- cp mail.properties.save mail.properties 2>/dev/null
- cp havecert.save havecert 2>/dev/null
- cp mysql_init_0001.save mysql_init_0001 2>/dev/null
- ;;
+ cp log4j.properties.save log4j.properties 2>/dev/null
+ cp provserver.properties.save provserver.properties 2>/dev/null
+ cp mail.properties.save mail.properties 2>/dev/null
+ cp havecert.save havecert 2>/dev/null
+ cp mysql_init_0001.save mysql_init_0001 2>/dev/null
+ ;;
'config')
- /bin/bash log4j.properties.tmpl >log4j.properties
- /bin/bash provserver.properties.tmpl >provserver.properties
- /bin/bash mail.properties.tmpl >mail.properties
- /bin/bash havecert.tmpl >havecert
- /bin/bash mysql_init_0001.tmpl >mysql_init_0001
- echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.prov
- chmod +x havecert
- rm -f /opt/app/platform/rc.d/K90zdrtrprov /opt/app/platform/rc.d/S99zdrtrprov
- ln -s ../init.d/drtrprov /opt/app/platform/rc.d/K90zdrtrprov
- ln -s ../init.d/drtrprov /opt/app/platform/rc.d/S99zdrtrprov
- ;;
+ /bin/bash log4j.properties.tmpl >log4j.properties
+ /bin/bash provserver.properties.tmpl >provserver.properties
+ /bin/bash mail.properties.tmpl >mail.properties
+ /bin/bash havecert.tmpl >havecert
+ /bin/bash mysql_init_0001.tmpl >mysql_init_0001
+ echo "$AFTSWM_ACTION_NEW_VERSION" >VERSION.prov
+ chmod +x havecert
+ rm -f /opt/app/platform/rc.d/K90zdrtrprov /opt/app/platform/rc.d/S99zdrtrprov
+ ln -s ../init.d/drtrprov /opt/app/platform/rc.d/K90zdrtrprov
+ ln -s ../init.d/drtrprov /opt/app/platform/rc.d/S99zdrtrprov
+ ;;
'clean')
- rm -f log4j.properties log4j.properties.save
- rm -f provserver.properties provserver.properties.save
- rm -f mail.properties mail.properties.save
- rm -f havecert havecert.properties.save
- rm -f mysql_init_0001 mysql_init_0001.save
- rm -f VERSION.prov
- rm -f /opt/app/platform/rc.d/K90zdrtrprov /opt/app/platform/rc.d/S99zdrtrprov
- ;;
+ rm -f log4j.properties log4j.properties.save
+ rm -f provserver.properties provserver.properties.save
+ rm -f mail.properties mail.properties.save
+ rm -f havecert havecert.properties.save
+ rm -f mysql_init_0001 mysql_init_0001.save
+ rm -f VERSION.prov
+ rm -f /opt/app/platform/rc.d/K90zdrtrprov /opt/app/platform/rc.d/S99zdrtrprov
+ ;;
*)
- exit 1
- ;;
+ exit 1
+ ;;
esac
done
exit 0
export CLASSPATH JAVA_HOME JAVA_OPTS TZ PATH
$JAVA_HOME/bin/java \
- -Dlog4j.configuration=file:///opt/app/datartr/etc/log4j.drroute.properties \
- org.onap.dmaap.datarouter.provisioning.utils.DRRouteCLI $*
+ -Dlog4j.configuration=file:///opt/app/datartr/etc/log4j.drroute.properties \
+ org.onap.dmaap.datarouter.provisioning.utils.DRRouteCLI $*
export CLASSPATH JAVA_HOME JAVA_OPTS TZ PATH
pids() {
- pgrep -u datartr -f provisioning.Main
+ pgrep -u datartr -f provisioning.Main
}
start() {
- ID=`id -n -u`
- GRP=`id -n -g`
- if [ "$ID" != "root" ]
- then
- echo drtrprov must be started as user datartr not $ID
- exit 1
- fi
+ ID=`id -n -u`
+ GRP=`id -n -g`
+ if [ "$ID" != "root" ]
+ then
+ echo drtrprov must be started as user datartr not $ID
+ exit 1
+ fi
# if [ "$GRP" != "datartr" ]
-# then
-# echo drtrprov must be started as group datartr not $GRP
-# exit 1
-# fi
-# cd /opt/app/datartr
-# if etc/havecert
-# then
-# echo >/dev/null
-# else
-# echo No certificate file available. Cannot start
-# exit 0
-# fi
- if [ "`pgrep -u mysql mysqld`" = "" ]
- then
- echo MariaDB is not running. It must be started before drtrprov
- exit 0
- fi
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- echo drtrprov already running
- exit 0
- fi
- echo '0 1 * * * /opt/app/datartr/bin/runreports' | crontab
- nohup java $JAVA_OPTS org.onap.dmaap.datarouter.provisioning.Main </dev/null &
- sleep 5
- PIDS=`pids`
- if [ "$PIDS" = "" ]
- then
- echo drtrprov startup failed
- else
- echo drtrprov started
- fi
+# then
+# echo drtrprov must be started as group datartr not $GRP
+# exit 1
+# fi
+# cd /opt/app/datartr
+# if etc/havecert
+# then
+# echo >/dev/null
+# else
+# echo No certificate file available. Cannot start
+# exit 0
+# fi
+ if [ "`pgrep -u mysql mysqld`" = "" ]
+ then
+ echo MariaDB is not running. It must be started before drtrprov
+ exit 0
+ fi
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ echo drtrprov already running
+ exit 0
+ fi
+ echo '0 1 * * * /opt/app/datartr/bin/runreports' | crontab
+ nohup java $JAVA_OPTS org.onap.dmaap.datarouter.provisioning.Main </dev/null &
+ sleep 5
+ PIDS=`pids`
+ if [ "$PIDS" = "" ]
+ then
+ echo drtrprov startup failed
+ else
+ echo drtrprov started
+ fi
}
stop() {
- ID=`id -n -u`
- GRP=`id -n -g`
- if [ "$ID" != "datartr" ]
- then
- echo drtrprov must be stopped as user datartr not $ID
- exit 1
- fi
- if [ "$GRP" != "datartr" ]
- then
- echo drtrprov must be stopped as group datartr not $GRP
- exit 1
- fi
- /usr/bin/curl http://127.0.0.1:8080/internal/halt
- sleep 5
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- sleep 5
- kill -9 $PIDS
- sleep 5
- echo drtrprov stopped
- else
- echo drtrprov not running
- fi
+ ID=`id -n -u`
+ GRP=`id -n -g`
+ if [ "$ID" != "datartr" ]
+ then
+ echo drtrprov must be stopped as user datartr not $ID
+ exit 1
+ fi
+ if [ "$GRP" != "datartr" ]
+ then
+ echo drtrprov must be stopped as group datartr not $GRP
+ exit 1
+ fi
+ /usr/bin/curl http://127.0.0.1:8080/internal/halt
+ sleep 5
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ sleep 5
+ kill -9 $PIDS
+ sleep 5
+ echo drtrprov stopped
+ else
+ echo drtrprov not running
+ fi
}
status() {
- PIDS=`pids`
- if [ "$PIDS" != "" ]
- then
- echo drtrprov running
- else
- echo drtrprov not running
- fi
+ PIDS=`pids`
+ if [ "$PIDS" != "" ]
+ then
+ echo drtrprov running
+ else
+ echo drtrprov not running
+ fi
}
case "$1" in
'start')
- start
- ;;
+ start
+ ;;
'stop')
- stop
- ;;
+ stop
+ ;;
'restart')
- stop
- sleep 20
- start
- ;;
+ stop
+ sleep 20
+ start
+ ;;
'status')
- status
- ;;
+ status
+ ;;
*)
- echo "Usage: $0 { start | stop | restart | status }"
- exit 1
- ;;
+ echo "Usage: $0 { start | stop | restart | status }"
+ exit 1
+ ;;
esac
exit 0
cd /opt/app/datartr;
if [ -f ${DRTR_PROV_KSTOREFILE:-etc/keystore} ]
then
- exit 0
+ exit 0
fi
echo `date '+%F %T,000'` WARN Certificate file "${DRTR_PROV_KSTOREFILE:-etc/keystore}" is missing >>${DRTR_PROV_LOGS:-logs}/provint.log
exit 1
This component is for the Data Router Provisioning Server software.
The following pre-requisite components should already be present:
- com.att.aft.swm:swm-cli
- com.att.aft.swm:swm-node
- - SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
- com.att.platform:uam-auto
- com.att.java:jdk8lin
- com.att.platform:initd
- com.att.platform:port-fwd
- - SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
- mysql:mysql
- mysql:mysql-config
- - SWM Variables: MYSQL_CONFIG_SIZE=small
- MYSQL_DB_DATABASES=datarouter
- MYSQL_DB_datarouter_USERS=datarouter,tier2
- MYSQL_DB_datarouter_USERS_datarouter_LEVEL=RW
- MYSQL_DB_datarouter_USERS_datarouter_PASSWORD=datarouter
- MYSQL_DB_datarouter_USERS_tier2_LEVEL=RO
- MYSQL_DB_datarouter_USERS_tier2_PASSWORD=<password>
- MYSQL_MAX_ALLOWED_PACKET=32M
- MYSQL_MAX_CONNECTIONS=300
- MYSQL_PASSWORD=datarouter
- MYSQL_PORT=3306
+ com.att.aft.swm:swm-cli
+ com.att.aft.swm:swm-node
+ - SWM Variables: AFTSWM_AUTOLINK_PARENTS=/opt/app:/opt/app/workload,/opt/app/aft:/opt/app/workload/aft
+ com.att.platform:uam-auto
+ com.att.java:jdk8lin
+ com.att.platform:initd
+ com.att.platform:port-fwd
+ - SWM Variables: PLATFORM_PORT_FWD=80,8080|443,8443
+ mysql:mysql
+ mysql:mysql-config
+ - SWM Variables: MYSQL_CONFIG_SIZE=small
+ MYSQL_DB_DATABASES=datarouter
+ MYSQL_DB_datarouter_USERS=datarouter,tier2
+ MYSQL_DB_datarouter_USERS_datarouter_LEVEL=RW
+ MYSQL_DB_datarouter_USERS_datarouter_PASSWORD=datarouter
+ MYSQL_DB_datarouter_USERS_tier2_LEVEL=RO
+ MYSQL_DB_datarouter_USERS_tier2_PASSWORD=<password>
+ MYSQL_MAX_ALLOWED_PACKET=32M
+ MYSQL_MAX_CONNECTIONS=300
+ MYSQL_PASSWORD=datarouter
+ MYSQL_PORT=3306
In a production environment, the SWM variables that MUST be overwridden are:
- DRTR_PROV_ACTIVEPOD, DRTR_PROV_STANDBYPOD, DRTR_PROV_NODES
+ DRTR_PROV_ACTIVEPOD, DRTR_PROV_STANDBYPOD, DRTR_PROV_NODES
In addition, in a non-production environment, the DRTR_PROV_CNAME SWM variable
must also be overwridden.
The SWM variables that can be set to control the provisioning server are:
DRTR_PROV_ACTIVEPOD
- The FQDN of the active POD
+ The FQDN of the active POD
DRTR_PROV_STANDBYPOD
- The FQDN of the standby POD
+ The FQDN of the standby POD
DRTR_PROV_CNAME (default feeds-drtr.web.att.com)
- The DNS CNAME used for the prov server in this environment.
+ The DNS CNAME used for the prov server in this environment.
DRTR_PROV_NODES
- Pipe-delimited list of DR nodes to init the DB with.
+ Pipe-delimited list of DR nodes to init the DB with.
DRTR_PROV_DOMAIN (default web.att.com)
- Domain to use for non-FQDN node names
+ Domain to use for non-FQDN node names
DRTR_PROV_INTHTTPPORT (default 8080)
- The TCP/IP port number the component should listen on for "go fetch"
- requests from the provisioning server
+ The TCP/IP port number the component should listen on for "go fetch"
+ requests from the provisioning server
DRTR_PROV_INTHTTPSPORT (default 8443)
- The TCP/IP port number the component should listen on for publish
- requests from feed publishers and other nodes
+ The TCP/IP port number the component should listen on for publish
+ requests from feed publishers and other nodes
DRTR_PROV_LOGS (default /opt/app/datartr/logs)
- The directory where log files should be kept
+ The directory where log files should be kept
DRTR_PROV_SPOOL (default /opt/app/datartr/spool)
- The directory where logfiles from the DR nodes are spooled before being
- imported into the DB.
+ The directory where logfiles from the DR nodes are spooled before being
+ imported into the DB.
DRTR_PROV_KEYMGRPASS (default changeit)
- The password for the key manager
+ The password for the key manager
DRTR_PROV_KSTOREFILE (default /opt/app/datartr/etc/keystore)
- The java keystore file containing the server certificate and private key
- for this server
+ The java keystore file containing the server certificate and private key
+ for this server
DRTR_PROV_KSTOREPASS (default changeit)
- The password for the keystore file
+ The password for the keystore file
DRTR_PROV_TSTOREFILE (by default, use the truststore from the Java JDK)
- The java keystore file containing the trusted certificate authority
- certificates
+ The java keystore file containing the trusted certificate authority
+ certificates
DRTR_PROV_TSTOREPASS (default changeit)
- The password for the trust store file. Only applies if a trust store
- file is specified.
+ The password for the trust store file. Only applies if a trust store
+ file is specified.
DRTR_PROV_DBLOGIN (default datarouter)
- The login used to access MariaDB
+ The login used to access MariaDB
DRTR_PROV_DBPASS (default datarouter)
- The password used to access MariaDB
+ The password used to access MariaDB
DRTR_PROV_DBSCRIPTS (default /opt/app/datartr/etc)
- The directory containing DB initialization scripts
+ The directory containing DB initialization scripts
if [ ! -x /usr/bin/curl ]
then
- echo provcmd: curl is required for this tool.
- exit 1
+ echo provcmd: curl is required for this tool.
+ exit 1
fi
optloop=
while [ -z "$optloop" ]
do
- if [ "$1" == '-s' ]
- then
- shift
- PROVSRVR="$1"
- shift
- elif [ "$1" == '-v' ]
- then
- shift
- VERBOSE=x
- elif [ "$1" == '-N' ]
- then
- shift
- NOPROXY='?noproxy=1'
- else
- optloop=1
- fi
+ if [ "$1" == '-s' ]
+ then
+ shift
+ PROVSRVR="$1"
+ shift
+ elif [ "$1" == '-v' ]
+ then
+ shift
+ VERBOSE=x
+ elif [ "$1" == '-N' ]
+ then
+ shift
+ NOPROXY='?noproxy=1'
+ else
+ optloop=1
+ fi
done
if [ -z "$PROVSRVR" ]
then
- echo "provcmd: you need to specify the server, either via the -s option"
- echo " or by setting and exporting PROVSRVR"
- exit 1
+ echo "provcmd: you need to specify the server, either via the -s option"
+ echo " or by setting and exporting PROVSRVR"
+ exit 1
fi
CMD="$1"
shift
if [ "$CMD" == 'delete' ]
then
- if [ $# -gt 0 ]
- then
- for i
- do
- [ -n "$VERBOSE" ] && echo curl -4 -k -X DELETE "https://$PROVSRVR/internal/api/$1$NOPROXY"
- curl -4 -k -X DELETE "https://$PROVSRVR/internal/api/$1$NOPROXY"
- done
- exit 0
- fi
+ if [ $# -gt 0 ]
+ then
+ for i
+ do
+ [ -n "$VERBOSE" ] && echo curl -4 -k -X DELETE "https://$PROVSRVR/internal/api/$1$NOPROXY"
+ curl -4 -k -X DELETE "https://$PROVSRVR/internal/api/$1$NOPROXY"
+ done
+ exit 0
+ fi
elif [ "$CMD" == 'create' ]
then
- if [ $# -eq 2 ]
- then
- # create (with POST), then set the value
- [ -n "$VERBOSE" ] && echo curl -4 -k -X POST --data '' "https://$PROVSRVR/internal/api/$1$NOPROXY"
- curl -4 -k -X POST --data '' "https://$PROVSRVR/internal/api/$1$NOPROXY"
- $PROVCMD set "$1" "$2"
- exit 0
- fi
+ if [ $# -eq 2 ]
+ then
+ # create (with POST), then set the value
+ [ -n "$VERBOSE" ] && echo curl -4 -k -X POST --data '' "https://$PROVSRVR/internal/api/$1$NOPROXY"
+ curl -4 -k -X POST --data '' "https://$PROVSRVR/internal/api/$1$NOPROXY"
+ $PROVCMD set "$1" "$2"
+ exit 0
+ fi
elif [ "$CMD" == 'get' ]
then
- if [ $# -eq 1 ]
- then
- # get
- [ -n "$VERBOSE" ] && echo curl -4 -k "https://$PROVSRVR/internal/api/$1$NOPROXY"
- curl -4 -k "https://$PROVSRVR/internal/api/$1$NOPROXY" 2>/dev/null | tr '|' '\012' | sort
- exit 0
- fi
+ if [ $# -eq 1 ]
+ then
+ # get
+ [ -n "$VERBOSE" ] && echo curl -4 -k "https://$PROVSRVR/internal/api/$1$NOPROXY"
+ curl -4 -k "https://$PROVSRVR/internal/api/$1$NOPROXY" 2>/dev/null | tr '|' '\012' | sort
+ exit 0
+ fi
elif [ "$CMD" == 'set' ]
then
- if [ $# -ge 2 ]
- then
- p="$1"
- shift
- v=""
- for i; do [ -n "$v" ] && v="$v|"; v="$v$i"; done
- # set (with PUT)
- ue=`urlencode "$v"`
- NOPROXY=`echo $NOPROXY | tr '?' '&'`
- [ -n "$VERBOSE" ] && echo curl -4 -k -X PUT "https://$PROVSRVR/internal/api/$p?val=$ue$NOPROXY"
- curl -4 -k -X PUT "https://$PROVSRVR/internal/api/$p?val=$ue$NOPROXY"
- exit 0
- fi
+ if [ $# -ge 2 ]
+ then
+ p="$1"
+ shift
+ v=""
+ for i; do [ -n "$v" ] && v="$v|"; v="$v$i"; done
+ # set (with PUT)
+ ue=`urlencode "$v"`
+ NOPROXY=`echo $NOPROXY | tr '?' '&'`
+ [ -n "$VERBOSE" ] && echo curl -4 -k -X PUT "https://$PROVSRVR/internal/api/$p?val=$ue$NOPROXY"
+ curl -4 -k -X PUT "https://$PROVSRVR/internal/api/$p?val=$ue$NOPROXY"
+ exit 0
+ fi
elif [ "$CMD" == 'append' ]
then
- if [ $# -ge 2 ]
- then
- p="$1"
- shift
- tmp=`curl -4 -k "https://$PROVSRVR/internal/api/$p$NOPROXY" 2>/dev/null`
- $PROVCMD set "$p" "$tmp" "$@"
- exit 0
- fi
+ if [ $# -ge 2 ]
+ then
+ p="$1"
+ shift
+ tmp=`curl -4 -k "https://$PROVSRVR/internal/api/$p$NOPROXY" 2>/dev/null`
+ $PROVCMD set "$p" "$tmp" "$@"
+ exit 0
+ fi
elif [ "$CMD" == 'remove' ]
then
- if [ $# -eq 2 ]
- then
- p="$1"
- rm="$2"
- $PROVCMD get "$p" | grep -v "^$rm\$" > /tmp/pc$$
- IFS=$'\r\n'
- $PROVCMD set "$p" `cat /tmp/pc$$`
- rm /tmp/pc$$
- exit 0
- fi
+ if [ $# -eq 2 ]
+ then
+ p="$1"
+ rm="$2"
+ $PROVCMD get "$p" | grep -v "^$rm\$" > /tmp/pc$$
+ IFS=$'\r\n'
+ $PROVCMD set "$p" `cat /tmp/pc$$`
+ rm /tmp/pc$$
+ exit 0
+ fi
fi
# Some error somewhere - display usage
GRP=`id -n -g`
if [ "$ID" != "datartr" ]
then
- echo runreports must be started as user datartr not $ID
- exit 1
+ echo runreports must be started as user datartr not $ID
+ exit 1
fi
if [ "$GRP" != "datartr" ]
then
- echo runreports must be started as group datartr not $GRP
- exit 1
+ echo runreports must be started as group datartr not $GRP
+ exit 1
fi
if [ "`pgrep -u mysql mysqld`" = "" ]
then
- echo MariaDB is not running. It must be started before runreports
- exit 1
+ echo MariaDB is not running. It must be started before runreports
+ exit 1
fi
# Volume report