Ingest onap-lab-ci jjb's 92/93992/2
authorBrian Freeman <bf1936@att.com>
Tue, 20 Aug 2019 18:13:58 +0000 (13:13 -0500)
committerDaniel Rose <dr695h@att.com>
Wed, 21 Aug 2019 17:12:16 +0000 (17:12 +0000)
Issue-ID: INT-1215
Change-Id: I448fb7a147daa26d760df6c83fef75aa69f05879
Signed-off-by: Brian Freeman <bf1936@att.com>
28 files changed:
deployment/onap-lab-ci/LICENSE [new file with mode: 0644]
deployment/onap-lab-ci/README.md [new file with mode: 0644]
deployment/onap-lab-ci/heat/grafana/grafana.yaml [new file with mode: 0644]
deployment/onap-lab-ci/heat/grafana/grafana_vm_config.yaml [new file with mode: 0644]
deployment/onap-lab-ci/heat/jenkins/jenkins_vm_config.yaml [new file with mode: 0644]
deployment/onap-lab-ci/heat/jenkins/lab-jenkins.yaml [new file with mode: 0644]
deployment/onap-lab-ci/jjb/jobs.yaml [new file with mode: 0644]
deployment/onap-lab-ci/jjb/macros.yaml [new file with mode: 0644]
deployment/onap-lab-ci/jjb/oom-template.yaml [new file with mode: 0644]
deployment/onap-lab-ci/jjb/rke-template.yaml [new file with mode: 0644]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb00-oom-resilience-instantiateDemoVFWCL/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb00-oom-resilience-instantiateDemoVFWCL/queue_all_jobs.sh [new file with mode: 0755]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-healthdist/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-healthdist/queue_all_jobs.sh [new file with mode: 0755]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-instantiateDemoVFWCL/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-instantiateDemoVFWCL/queue_all_jobs.sh [new file with mode: 0755]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-vfwclosedloop/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-vfwclosedloop/queue_all_jobs.sh [new file with mode: 0755]
deployment/onap-lab-ci/jobs/dublin/queue_all_jobs.sh [new file with mode: 0755]
deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-healthdist/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-instantiateDemoVFWCL/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-vfwclosedloop/pods_to_delete.txt [new file with mode: 0644]
deployment/onap-lab-ci/jobs/get-result.sh [new file with mode: 0755]
deployment/onap-lab-ci/scripts/load_all.sh [new file with mode: 0755]
deployment/onap-lab-ci/scripts/load_influx.sh [new file with mode: 0755]
deployment/onap-lab-ci/scripts/mirror-nexus.sh [new file with mode: 0755]
deployment/onap-lab-ci/scripts/process-pods.sh [new file with mode: 0755]
deployment/onap-lab-ci/scripts/process-robot.sh [new file with mode: 0755]

diff --git a/deployment/onap-lab-ci/LICENSE b/deployment/onap-lab-ci/LICENSE
new file mode 100644 (file)
index 0000000..261eeb9
--- /dev/null
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/deployment/onap-lab-ci/README.md b/deployment/onap-lab-ci/README.md
new file mode 100644 (file)
index 0000000..d7efc7b
--- /dev/null
@@ -0,0 +1 @@
+# onap-lab-ci
\ No newline at end of file
diff --git a/deployment/onap-lab-ci/heat/grafana/grafana.yaml b/deployment/onap-lab-ci/heat/grafana/grafana.yaml
new file mode 100644 (file)
index 0000000..c32342b
--- /dev/null
@@ -0,0 +1,36 @@
+heat_template_version: 2015-10-15
+
+resources:
+  influxdb_data:
+    type: OS::Cinder::Volume
+    properties:
+      size: 100
+  grafana:
+    type: OS::Nova::Server
+    properties:
+      name: grafana
+      image: bionic
+      flavor: m1.large
+      key_name: mykey
+      networks:
+        - network: c5ef4668-8a11-441d-8ce8-4a211a94885f
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          params:
+            "%voldata_id%": { get_resource: influxdb_data }
+          template:
+            get_file: grafana_vm_config.yaml
+  influxdb_data_att:
+    type: OS::Cinder::VolumeAttachment
+    properties:
+      instance_uuid: { get_resource: grafana }
+      volume_id: { get_resource: influxdb_data }
+
+outputs:
+  instance_name:
+    description: Name of the instance.
+    value: { get_attr: [ grafana, name ] }
+  instance_ip:
+    description: IP address of the instance.
+    value: { get_attr: [ grafana, first_address ] }
diff --git a/deployment/onap-lab-ci/heat/grafana/grafana_vm_config.yaml b/deployment/onap-lab-ci/heat/grafana/grafana_vm_config.yaml
new file mode 100644 (file)
index 0000000..1151d70
--- /dev/null
@@ -0,0 +1,61 @@
+#cloud-config
+# vim: syntax=yaml
+write_files:
+- path: /opt/format-disks
+  permissions: '0700'
+  content: |
+    #!/bin/bash
+    voldata_id="%voldata_id%"
+    voldata_dev="/dev/disk/by-id/virtio-$(echo ${voldata_id} | cut -c -20)"
+    mkfs.ext4 ${voldata_dev}
+    mkdir -pv /var/lib/influxdb
+    echo "${voldata_dev} /var/lib/influxdb ext4 defaults 1 2" >> /etc/fstab
+    mount /var/lib/influxdb
+- path: /opt/grafana_vm_entrypoint.sh
+  permissions: '0755'
+  content: |
+    #!/bin/bash -x
+
+    printenv
+
+    cp ~ubuntu/.ssh/authorized_keys /root/.ssh
+
+    cat > /etc/apt/apt.conf.d/90curtin-aptproxy<<EOF
+    Acquire::http::Proxy "http://10.145.122.117:8000/";
+    EOF
+
+    source /etc/lsb-release
+    curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add -
+    echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
+    curl https://packagecloud.io/gpg.key | sudo apt-key add -
+    echo "deb https://packagecloud.io/grafana/stable/debian/ stretch main" | sudo tee /etc/apt/sources.list.d/grafana.list
+
+    apt-get update
+
+    cat >> /etc/inputrc <<EOF
+    set show-all-if-ambiguous on
+    set show-all-if-unmodified on
+    set match-hidden-files off
+    set mark-symlinked-directories on
+    EOF
+
+    export HOME=/root
+    apt-get -y install git
+    git config --global user.email "grafana@localhost"
+    git config --global user.name "grafana"
+    apt-get -y install etckeeper
+
+    apt-get -y install influxdb
+    apt-get -y install grafana
+
+    systemctl daemon-reload
+
+    systemctl enable influxdb
+    systemctl start influxdb
+
+    systemctl enable grafana-server.service
+    systemctl start grafana-server
+
+runcmd:
+- /opt/format-disks
+- /opt/grafana_vm_entrypoint.sh
diff --git a/deployment/onap-lab-ci/heat/jenkins/jenkins_vm_config.yaml b/deployment/onap-lab-ci/heat/jenkins/jenkins_vm_config.yaml
new file mode 100644 (file)
index 0000000..0bf6347
--- /dev/null
@@ -0,0 +1,176 @@
+#cloud-config
+# vim: syntax=yaml
+write_files:
+- path: /opt/format-disks
+  permissions: '0700'
+  content: |
+    #!/bin/bash
+    voldata_id="%voldata_id%"
+    voldata_dev="/dev/disk/by-id/virtio-$(echo ${voldata_id} | cut -c -20)"
+    mkfs.ext4 ${voldata_dev}
+    mkdir -pv /var/lib/jenkins
+    echo "${voldata_dev} /var/lib/jenkins ext4 defaults 1 2" >> /etc/fstab
+    mount /var/lib/jenkins
+- path: /opt/jenkins_vm_entrypoint.sh
+  permissions: '0755'
+  content: |
+    #!/bin/bash -x
+
+    printenv
+
+    echo `hostname -I` `hostname` >> /etc/hosts
+
+    function restart_jenkins() {
+      sudo systemctl restart jenkins
+      sleep 1
+      echo -n "Restarting jenkins"
+      until $(curl --output /dev/null --silent --head --fail http://localhost:8080/login); do
+        printf '.'
+        sleep 3
+      done
+      echo
+      sleep 1
+    }
+
+    cp ~ubuntu/.ssh/authorized_keys /root/.ssh
+
+    cat > /etc/apt/apt.conf.d/90curtin-aptproxy<<EOF
+    Acquire::http::Proxy "http://10.145.122.117:8000/";
+    EOF
+
+    apt-get update
+
+    cat >> /etc/inputrc <<EOF
+    set show-all-if-ambiguous on
+    set show-all-if-unmodified on
+    set match-hidden-files off
+    set mark-symlinked-directories on
+    EOF
+
+    export HOME=/root
+    apt-get -y install git
+    git config --global user.email "jenkins@localhost"
+    git config --global user.name "jenkins"
+    apt-get -y install etckeeper
+    apt-get -y install curl openjdk-8-jre unzip python3-openstackclient python3-heatclient python3-jenkins-job-builder jq openvpn python3-pip xmlstarlet tree
+    pip3 install robotframework
+
+    # install Jenkins
+    wget -q -O - https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo apt-key add -
+    sh -c 'echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list'
+    apt-get update
+    apt-get -y install jenkins
+
+    mkdir -p ~jenkins/.ssh
+    cp ~ubuntu/.ssh/authorized_keys ~jenkins/.ssh
+    chown -R jenkins:jenkins ~jenkins/.ssh
+
+    su -l jenkins -c "/opt/jenkins-init-1.sh"
+
+    restart_jenkins
+
+    su -l jenkins -c "/opt/jenkins-init-2.sh"
+
+    restart_jenkins
+
+- path: /opt/jenkins-init-1.sh
+  permissions: '0755'
+  content: |
+    #!/bin/bash -x
+
+    git config --global user.email "jenkins@localhost"
+    git config --global user.name "jenkins"
+
+    cd ~jenkins
+
+    cp /etc/skel/.profile .
+    cat > .bashrc <<EOF
+    alias ls='ls --color -F'
+    EOF
+
+    git init
+
+    git add -A
+    git commit -m 'Initial installation config' > /dev/null
+
+    rm -f secrets/initialAdminPassword
+    rm -rf users/admin
+    rsync -avP /opt/jenkins/ .
+
+    git add -A
+    git commit -m 'Set up jenkins user' > /dev/null
+
+- path: /opt/jenkins-init-2.sh
+  permissions: '0755'
+  content: |
+    #!/bin/bash -x
+
+    cd ~jenkins
+    ln -s /var/cache/jenkins/war/WEB-INF/jenkins-cli.jar
+
+    sleep 20
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin git
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin ws-cleanup
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin envinject
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin gerrit-trigger
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin robot
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin postbuildscript
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin timestamper
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin build-blocker-plugin
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin build-timeout
+    java -jar jenkins-cli.jar -s http://localhost:8080/ -auth jenkins:jenkins install-plugin influxdb
+
+    git add -A
+    git commit -m 'Install initial plugins' > /dev/null
+
+    git clone https://github.com/garyiwu/onap-lab-ci.git
+    cd onap-lab-ci
+    jenkins-jobs update -r jjb
+
+- path: /opt/jenkins/jenkins.install.InstallUtil.lastExecVersion
+  content: |
+    2.46.3
+- path: /opt/jenkins/users/jenkins/config.xml
+  content: |
+    <?xml version='1.0' encoding='UTF-8'?>
+    <user>
+      <fullName>jenkins</fullName>
+      <properties>
+        <jenkins.security.ApiTokenProperty>
+          <apiToken>{AQAAABAAAAAwQAGpldGajxw//dhxd53gZGv4w0JnZYDETTLBQdpotQXt02s0lq13YrhyaytbLFMflb98hzWY9YBlDIThZt7u+Q==}</apiToken>
+        </jenkins.security.ApiTokenProperty>
+        <com.cloudbees.plugins.credentials.UserCredentialsProvider_-UserCredentialsProperty plugin="credentials@2.1.13">
+          <domainCredentialsMap class="hudson.util.CopyOnWriteMap$Hash"/>
+        </com.cloudbees.plugins.credentials.UserCredentialsProvider_-UserCredentialsProperty>
+        <hudson.model.MyViewsProperty>
+          <views>
+            <hudson.model.AllView>
+              <owner class="hudson.model.MyViewsProperty" reference="../../.."/>
+              <name>all</name>
+              <filterExecutors>false</filterExecutors>
+              <filterQueue>false</filterQueue>
+              <properties class="hudson.model.View$PropertyList"/>
+            </hudson.model.AllView>
+          </views>
+        </hudson.model.MyViewsProperty>
+        <org.jenkinsci.plugins.displayurlapi.user.PreferredProviderUserProperty plugin="display-url-api@2.0">
+          <providerId>default</providerId>
+        </org.jenkinsci.plugins.displayurlapi.user.PreferredProviderUserProperty>
+        <hudson.model.PaneStatusProperties>
+          <collapsed/>
+        </hudson.model.PaneStatusProperties>
+        <hudson.search.UserSearchProperty>
+          <insensitiveSearch>false</insensitiveSearch>
+        </hudson.search.UserSearchProperty>
+        <hudson.security.HudsonPrivateSecurityRealm_-Details>
+          <passwordHash>#jbcrypt:$2a$10$Esc9z/mnK/CQ8crgFbE3/eP1EI6pvzIHRBe3SSik7rrNt.DDftON2</passwordHash>
+        </hudson.security.HudsonPrivateSecurityRealm_-Details>
+        <hudson.tasks.Mailer_-UserProperty plugin="mailer@1.20">
+          <emailAddress>jenkins@localhost</emailAddress>
+        </hudson.tasks.Mailer_-UserProperty>
+      </properties>
+    </user>
+
+runcmd:
+- /opt/format-disks
+- /opt/jenkins_vm_entrypoint.sh
diff --git a/deployment/onap-lab-ci/heat/jenkins/lab-jenkins.yaml b/deployment/onap-lab-ci/heat/jenkins/lab-jenkins.yaml
new file mode 100644 (file)
index 0000000..94fb79e
--- /dev/null
@@ -0,0 +1,36 @@
+heat_template_version: 2015-10-15
+
+resources:
+  jenkins_data:
+    type: OS::Cinder::Volume
+    properties:
+      size: 400
+  jenkins:
+    type: OS::Nova::Server
+    properties:
+      name: jenkins
+      image: bionic
+      flavor: m1.medium
+      key_name: mykey
+      networks:
+        - network: c5ef4668-8a11-441d-8ce8-4a211a94885f
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          params:
+            "%voldata_id%": { get_resource: jenkins_data }
+          template:
+            get_file: jenkins_vm_config.yaml
+  jenkins_data_att:
+    type: OS::Cinder::VolumeAttachment
+    properties:
+      instance_uuid: { get_resource: jenkins }
+      volume_id: { get_resource: jenkins_data }
+
+outputs:
+  instance_name:
+    description: Name of the instance.
+    value: { get_attr: [ jenkins, name ] }
+  instance_ip:
+    description: IP address of the instance.
+    value: { get_attr: [ jenkins, first_address ] }
diff --git a/deployment/onap-lab-ci/jjb/jobs.yaml b/deployment/onap-lab-ci/jjb/jobs.yaml
new file mode 100644 (file)
index 0000000..83166e0
--- /dev/null
@@ -0,0 +1,143 @@
+- project:
+    name: staging
+    jobs:
+     - '{env}-staging-{frequency}'
+    integration-branch: 'master'
+    oom-branch: 'staging'
+    env:
+     - 'windriver':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-Staging-Daily'
+         stack-name: 'staging'
+         frequency: 'daily'
+         disabled_var: false
+         triggers_var:
+          - timed: 'H 4 * * *'
+     - 'windriver-sb00':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-00'
+         stack-name: 'sb00'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb01':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-01'
+         stack-name: 'sb01'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb02':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-02'
+         stack-name: 'sb02'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb03':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-03'
+         stack-name: 'sb03'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb04':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-04'
+         stack-name: 'sb04'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-integration-design':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-Design'
+         stack-name: 'design'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-integration-instantiation':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-Instantiation'
+         stack-name: 'instantiation'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-integration-closedloop':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-ClosedLoop'
+         stack-name: 'closedloop'
+         frequency: 'manual'
+         disabled_var: false
+
+- project:
+    name: release
+    jobs:
+     - '{env}-release-{frequency}'
+    integration-branch: 'master'
+    oom-branch: 'master'
+    env:
+     - 'windriver':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-Release-Daily'
+         stack-name: 'release'
+         frequency: 'daily'
+         disabled_var: false
+         triggers_var:
+          - timed: 'H 6 * * *'
+     - 'windriver-longevity':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-Longevity'
+         stack-name: 'long'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb00':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-00'
+         stack-name: 'sb00'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb01':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-01'
+         stack-name: 'sb01'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb02':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-02'
+         stack-name: 'sb02'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb03':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-03'
+         stack-name: 'sb03'
+         frequency: 'manual'
+         disabled_var: false
+     - 'windriver-sb04':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-04'
+         stack-name: 'sb04'
+         frequency: 'manual'
+         disabled_var: false
+
+- project:
+    name: dublin
+    jobs:
+     - '{env}-release-{frequency}'
+    integration-branch: 'master'
+    oom-branch: 'dublin'
+    env:
+     - 'windriver-dublin-sb04':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-SB-04'
+         stack-name: 'dublin'
+         frequency: 'manual'
+         disabled_var: false
+
+- project:
+    name: stability
+    jobs:
+     - '{env}-stability72hr'
+     - '{env}-vfwclosedloop'
+    integration-branch: 'master'
+    env:
+     - 'windriver-longevity':
+         lab-name: 'windriver'
+         tenant-name: 'Integration-Longevity'
+         stack-name: 'long'
+         disabled_var: false
+
diff --git a/deployment/onap-lab-ci/jjb/macros.yaml b/deployment/onap-lab-ci/jjb/macros.yaml
new file mode 100644 (file)
index 0000000..47d4241
--- /dev/null
@@ -0,0 +1,163 @@
+---
+- scm:
+    name: git-integration
+    scm:
+      - git:
+          url: 'http://gerrit.onap.org/r/integration'
+          branches:
+            - 'origin/{branch}'
+          wipe-workspace: false
+          skip-tag: true
+          timeout: 30
+
+- scm:
+    name: gerrit-trigger-scm
+    scm:
+      - git:
+          url: 'http://gerrit.onap.org/r/$GERRIT_PROJECT'
+          refspec: '$GERRIT_REFSPEC'
+          branches:
+            - 'origin/$GERRIT_BRANCH'
+          skip-tag: true
+          choosing-strategy: 'gerrit'
+          submodule:
+            recursive: true
+
+- publisher:
+    name: integration-robot
+    publishers:
+      - robot:
+          output-path: 'archives'
+          other-files: ''
+          unstable-threshold: 60
+          pass-threshold: 100
+          only-critical: false
+      - postbuildscript:
+          builders:
+            - role: BOTH
+              build-on:
+                - ABORTED
+                - FAILURE
+                - NOT_BUILT
+                - SUCCESS
+                - UNSTABLE
+              build-steps:
+                - robot-influxdb
+          mark-unstable-if-failed: true
+
+- publisher:
+    name: pods-influxdb
+    publishers:
+      - postbuildscript:
+          builders:
+            - role: BOTH
+              build-on:
+                - ABORTED
+                - FAILURE
+                - NOT_BUILT
+                - SUCCESS
+                - UNSTABLE
+              build-steps:
+                - pods-influxdb
+          mark-unstable-if-failed: true
+
+- publisher:
+    name: archive-logs
+    publishers:
+      - postbuildscript:
+          builders:
+            - role: BOTH
+              build-on:
+                - ABORTED
+                - FAILURE
+                - NOT_BUILT
+                - SUCCESS
+                - UNSTABLE
+              build-steps:
+                - archive-logs
+          mark-unstable-if-failed: true
+
+- builder:
+    name: archive-logs
+    builders:
+      - shell: |
+          #!/bin/bash -x
+          mkdir -p /var/www/html/logs/$JOB_NAME/$BUILD_NUMBER
+          curl -s -f "http://localhost:8080/jenkins/job/$JOB_NAME/$BUILD_NUMBER/consoleText" > $WORKSPACE/archives/console.log
+          curl -s -f "http://localhost:8080/jenkins/job/$JOB_NAME/$BUILD_NUMBER/timestamps/?time=HH:mm:ssZ&appendLog" >  $WORKSPACE/archives/console-source-timestamp.log
+          rsync -avt $WORKSPACE/archives/ /var/www/html/logs/$JOB_NAME/$BUILD_NUMBER
+          echo
+          echo "Browse logs at http://onapci.org/logs/$JOB_NAME/$BUILD_NUMBER/"
+          echo
+
+- builder:
+    name: robot-influxdb
+    builders:
+      - shell: |
+          #!/bin/bash -x
+          # $JENKINS_HOME/onap-lab-ci/scripts/process-robot.sh $WORKSPACE/archives/output.xml $JOB_NAME $BUILD_NUMBER
+
+- builder:
+    name: pods-influxdb
+    builders:
+      - shell: |
+          #!/bin/bash -x
+          # $JENKINS_HOME/onap-lab-ci/scripts/process-pods.sh $WORKSPACE/archives/onap-pods.json $JOB_NAME $BUILD_NUMBER
+
+- publisher:
+    name: trigger-lf-lab-job
+    publishers:
+      - postbuildscript:
+          builders:
+            - role: BOTH
+              build-on:
+                - ABORTED
+                - FAILURE
+                - NOT_BUILT
+                - SUCCESS
+                - UNSTABLE
+              build-steps:
+                - trigger-lf-lab-job:
+                    lab-name: '{lab-name}'
+          mark-unstable-if-failed: false
+
+- builder:
+    name: trigger-lf-lab-job
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          LF_JOBS="tlab-oom-daily tlab-oom-staging-daily windriver-oom-daily windriver-oom-staging-daily"
+          echo $LF_JOBS | grep -q $JOB_NAME
+          if [ $? -ne 0 ]; then
+            exit 0
+          fi
+
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+
+          set -v
+          CRUMB=$(curl -s -u "$LF_USERNAME:$LF_PASSWORD" 'https://jenkins.onap.org/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)')
+          curl -s -u "$LF_USERNAME:$LF_PASSWORD" -H "$CRUMB" -X POST "https://jenkins.onap.org/job/lab-$JOB_NAME/buildWithParameters?SRC_BUILD_URL=$BUILD_URL&LOG_DIR_URL=http://onapci.org/logs/$JOB_NAME/$BUILD_NUMBER/"
+
+- trigger:
+    name: gerrit-trigger-patch-submitted
+    triggers:
+      - gerrit:
+          server-name: 'gerrit.onap.org'
+          trigger-on:
+            - patchset-created-event:
+                exclude-drafts: 'false'
+                exclude-trivial-rebase: 'false'
+                exclude-no-code-change: 'false'
+            - draft-published-event
+            - comment-added-contains-event:
+                comment-contains-value: 'recheck'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: '{project}'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+              file-paths:
+                - compare-type: 'ANT'
+                  pattern: '{files}'
diff --git a/deployment/onap-lab-ci/jjb/oom-template.yaml b/deployment/onap-lab-ci/jjb/oom-template.yaml
new file mode 100644 (file)
index 0000000..af8c67a
--- /dev/null
@@ -0,0 +1,523 @@
+---
+
+- job-template:
+    name: '{env}-resilience-healthdist'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+    parameters:
+      - string:
+          name: POD_TO_DELETE
+          description: 'Name of pod to delete.  This string will be grepped against the pod list and used to delete the pod.'
+    properties:
+      - build-blocker:
+          blocking-jobs:
+            - "{env}-deploy"
+            - "{env}-healthcheck"
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+          source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+          STACK_NAME="{stack-name}"
+          NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+          K8S_IP=$(openstack stack output show $STACK_NAME k8s_01_vm_ip -c output_value -f value)
+          ssh-keygen -R $NFS_IP
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          ROBOT_POD=$(echo "kubectl -n onap get pods | grep robot | sed 's/ .*//'" | ssh -i $SSH_KEY -o StrictHostKeychecking=no ubuntu@$NFS_IP sudo su)
+          if [ "$ROBOT_POD" == "" ]; then
+            exit 1
+          fi
+
+          POD_TO_KILL=$(echo "kubectl -n onap get pods | grep $POD_TO_DELETE | sed 's/ .*//' | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "kubectl -n onap delete pod $POD_TO_KILL" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          for n in $(seq 1 180); do
+            sleep 30
+            RESULT=$(ssh -i $SSH_KEY ubuntu@$NFS_IP 'sudo su -c "kubectl -n onap get pods"' | grep -vE 'NAME|Completed|Error|1/1|2/2|3/3' | wc -l)
+            if [[ $? -eq 0 && $RESULT -eq 0 ]]; then
+              break
+            fi
+          done
+
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap healthdist"'
+          retval=$?
+
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep healthdist | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          if [ "$LOG_DIR" == "" ]; then
+            exit 1
+          fi
+
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+
+          exit 0
+    publishers:
+      - integration-robot
+      - archive-logs
+
+- job-template:
+    name: '{env}-resilience-instantiateDemoVFWCL'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+    parameters:
+      - string:
+          name: POD_TO_DELETE
+          description: 'Name of pod to delete.  This string will be grepped against the pod list and used to delete the pod.'
+    properties:
+      - build-blocker:
+          blocking-jobs:
+            - "{env}-deploy"
+            - "{env}-healthcheck"
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+          source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+          STACK_NAME="{stack-name}"
+          NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+          K8S_IP=$(openstack stack output show $STACK_NAME k8s_01_vm_ip -c output_value -f value)
+          ssh-keygen -R $NFS_IP
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          ROBOT_POD=$(echo "kubectl -n onap get pods | grep robot | sed 's/ .*//'" | ssh -i $SSH_KEY -o StrictHostKeychecking=no ubuntu@$NFS_IP sudo su)
+          if [ "$ROBOT_POD" == "" ]; then
+            exit 1
+          fi
+
+          POD_TO_KILL=$(echo "kubectl -n onap get pods | grep $POD_TO_DELETE | sed 's/ .*//' | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "kubectl -n onap delete pod $POD_TO_KILL" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          for n in $(seq 1 180); do
+            sleep 30
+            RESULT=$(ssh -i $SSH_KEY ubuntu@$NFS_IP 'sudo su -c "kubectl -n onap get pods"' | grep -vE 'NAME|Completed|Error|1/1|2/2|3/3' | wc -l)
+            if [[ $? -eq 0 && $RESULT -eq 0 ]]; then
+              break
+            fi
+          done
+
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap instantiateDemoVFWCL"'
+          retval=$?
+
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep instantiateDemoVFWCL | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          if [ "$LOG_DIR" == "" ]; then
+            exit 1
+          fi
+
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+
+          # clean up demo stacks
+          DEMO_STACKS=$(openstack stack list -f value -c "Stack Name" --sort creation_time:desc | grep Vfmodule_Demo_ )
+          if [ ! -z "$DEMO_STACKS" ]; then
+            openstack stack delete -y $DEMO_STACKS
+          fi
+
+          # clean up vVG VNFs
+          VVG_SERVERS=$(openstack server list -f value -c Name  --sort creation_time:desc | grep vVG_)
+          if [ ! -z "$VVG_SERVERS" ]; then
+            openstack server delete $VVG_SERVERS
+          fi
+
+          exit 0
+    publishers:
+      - integration-robot
+      - archive-logs
+
+- job-template:
+    name: '{env}-resilience-vfwclosedloop'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+    parameters:
+      - string:
+          name: POD_TO_DELETE
+          description: 'Name of pod to delete.  This string will be grepped against the pod list and used to delete the pod.'
+    properties:
+      - build-blocker:
+          blocking-jobs:
+            - "{env}-deploy"
+            - "{env}-healthcheck"
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+          source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+          STACK_NAME="{stack-name}"
+          NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+          K8S_IP=$(openstack stack output show $STACK_NAME k8s_01_vm_ip -c output_value -f value)
+          ssh-keygen -R $NFS_IP
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          ROBOT_POD=$(echo "kubectl -n onap get pods | grep robot | sed 's/ .*//'" | ssh -i $SSH_KEY -o StrictHostKeychecking=no ubuntu@$NFS_IP sudo su)
+          if [ "$ROBOT_POD" == "" ]; then
+            exit 1
+          fi
+
+          POD_TO_KILL=$(echo "kubectl -n onap get pods | grep $POD_TO_DELETE | sed 's/ .*//' | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "kubectl -n onap delete pod $POD_TO_KILL" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          for n in $(seq 1 180); do
+            sleep 30
+            RESULT=$(ssh -i $SSH_KEY ubuntu@$NFS_IP 'sudo su -c "kubectl -n onap get pods"' | grep -vE 'NAME|Completed|Error|1/1|2/2|3/3' | wc -l)
+            if [[ $? -eq 0 && $RESULT -eq 0 ]]; then
+              break
+            fi
+          done
+
+          PKG_STACK=$(openstack stack list -f value -c "Stack Name" --sort creation_time:desc | grep Vfmodule_Demo_vFWCLvPKG | head -1)
+          PUBLIC_NET_ID=$(openstack stack show $STACK_NAME -f json | jq -r '.parameters.public_net_id')
+          PUBLIC_NET_NAME=$(openstack network show $PUBLIC_NET_ID -f value -c name)
+          PKG_IP=$(openstack stack resource show $PKG_STACK vpg_server_0 -f json | jq -r ".attributes.addresses.$PUBLIC_NET_NAME[0].addr")
+
+          for n in $(seq 1 10); do
+            echo "Wait for vfwclosedloop count $n of 10"
+            echo "/root/oom/kubernetes/robot/demo-k8s.sh onap vfwclosedloop $PKG_IP" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+            RESULT=$?
+            if [[ $RESULT -eq 0 ]]; then
+              break
+            fi
+          done
+
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep vfwclosedloop | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          if [ "$LOG_DIR" == "" ]; then
+            exit 1
+          fi
+
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+
+          exit 0
+    publishers:
+      - integration-robot
+      - archive-logs
+
+
+- job-template:
+    disabled_var:
+    name: '{env}-stability72hr'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+    triggers:
+      - timed: '0 * * * *'
+    properties:
+      - build-blocker:
+          blocking-jobs:
+            - "{env}-deploy"
+            - "{env}-healthcheck"
+            - "{env}-instantiate"
+            - "{env}-manual"
+            - "{env}-staging-manual"
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+          source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+          STACK_NAME="{stack-name}"
+          NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+          K8S_IP=$(openstack stack output show $STACK_NAME k8s_01_vm_ip -c output_value -f value)
+          ssh-keygen -R $NFS_IP
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          ROBOT_POD=$(echo "kubectl -n onap get pods | grep robot | sed 's/ .*//'" | ssh -i $SSH_KEY -o StrictHostKeychecking=no ubuntu@$NFS_IP sudo su)
+          if [ "$ROBOT_POD" == "" ]; then
+            exit 1
+          fi
+
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap stability72hr"'
+          retval=$?
+
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep stability72hr | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          if [ "$LOG_DIR" == "" ]; then
+            exit 1
+          fi
+
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+
+          # clean up demo stacks
+          DEMO_STACKS=$(openstack stack list -f value -c "Stack Name" --sort creation_time:desc | grep Vfmodule_Ete_ )
+          if [ ! -z "$DEMO_STACKS" ]; then
+            openstack stack delete -y $DEMO_STACKS
+          fi
+
+          # clean up vVG VNFs
+          VVG_SERVERS=$(openstack server list -f value -c Name  --sort creation_time:desc | grep vVG_)
+          if [ ! -z "$VVG_SERVERS" ]; then
+            openstack server delete $VVG_SERVERS
+          fi
+
+          exit 0
+    publishers:
+      - integration-robot
+      - archive-logs
+
+- job-template:
+    disabled_var:
+    name: '{env}-vfwclosedloop'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+    triggers:
+      - timed: '0 * * * *'
+    properties:
+      - build-blocker:
+          blocking-jobs:
+            - "{env}-deploy"
+            - "{env}-healthcheck"
+            - "{env}-instantiate"
+            - "{env}-manual"
+            - "{env}-staging-manual"
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+          source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+          STACK_NAME="{stack-name}"
+          NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+          K8S_IP=$(openstack stack output show $STACK_NAME k8s_01_vm_ip -c output_value -f value)
+          ssh-keygen -R $NFS_IP
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          ROBOT_POD=$(echo "kubectl -n onap get pods | grep robot | sed 's/ .*//'" | ssh -i $SSH_KEY -o StrictHostKeychecking=no ubuntu@$NFS_IP sudo su)
+          if [ "$ROBOT_POD" == "" ]; then
+            exit 1
+          fi
+
+          PKG_STACK=$(openstack stack list -f value -c "Stack Name" --sort creation_time:desc | grep Vfmodule_Demo_vFWCLvPKG | head -1)
+          PUBLIC_NET_ID=$(openstack stack show $STACK_NAME -f json | jq -r '.parameters.public_net_id')
+          PUBLIC_NET_NAME=$(openstack network show $PUBLIC_NET_ID -f value -c name)
+          PKG_IP=$(openstack stack resource show $PKG_STACK vpg_server_0 -f json | jq -r ".attributes.addresses.$PUBLIC_NET_NAME[0].addr")
+
+
+          echo "/root/oom/kubernetes/robot/demo-k8s.sh onap vfwclosedloop $PKG_IP" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          retval=$?
+
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep vfwclosedloop | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          if [ "$LOG_DIR" == "" ]; then
+            exit 1
+          fi
+
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/
+
+          echo "kubectl top nodes" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          echo "kubectl get pods -n onap | grep -vE 'Completed|Error|1/1|2/2|3/3'" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+
+          exit 0
+    publishers:
+      - integration-robot
+      - archive-logs
+
+
+- job-template:
+    name: 'oom-verify'
+    project-type: freestyle
+    parameters:
+      - string:
+          name: GERRIT_PROJECT
+          default: 'oom'
+          description: 'GERRIT_PROJECT parameter if not given by trigger'
+      - string:
+          name: GERRIT_BRANCH
+          default: '{branch}'
+          description: 'JJB configured GERRIT_BRANCH parameter'
+      - string:
+          name: GERRIT_REFSPEC
+          default: 'refs/heads/{branch}'
+          description: 'GERRIT_REFSPEC parameter if not given by trigger'
+    scm:
+      - gerrit-trigger-scm
+    triggers:
+      - gerrit:
+          server-name: 'gerrit.onap.org'
+          trigger-on:
+            - patchset-created-event:
+                exclude-drafts: 'false'
+                exclude-trivial-rebase: 'false'
+                exclude-no-code-change: 'false'
+            - draft-published-event
+            - comment-added-contains-event:
+                comment-contains-value: 'recheck'
+          projects:
+            - project-compare-type: 'ANT'
+              project-pattern: 'oom'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+              file-paths:
+                - compare-type: 'ANT'
+                  pattern: 'kubernetes/**/*.yaml'
+            - project-compare-type: 'ANT'
+              project-pattern: '*/oom'
+              branches:
+                - branch-compare-type: 'ANT'
+                  branch-pattern: '**/{branch}'
+              file-paths:
+                - compare-type: 'ANT'
+                  pattern: '**/*.yaml'
+    wrappers:
+      - timestamps
+      - timeout:
+          timeout: 720
+          fail: true
+    builders:
+      - shell: |
+          #!/bin/bash
+
+          set +e
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          NEXUS_RELEASE_PREFIX="https://nexus3.onap.org/repository/docker.release/v2"
+          RELEASE_TAGS_PATH="/tmp/onap-docker-release"
+          err=0
+
+          # if no files changed, will scan all files
+          CHANGED_FILES=""
+          if [ $(git rev-parse HEAD) != $(git rev-parse origin/master) ]; then
+            CHANGED_FILES=$(git diff-tree --no-commit-id --name-only -r HEAD)
+          fi
+          IMAGES_FILE=$WORKSPACE/archives/images.txt
+          rgrep --exclude-dir=pnda -n -E ':\s*onap/.*:.*' $CHANGED_FILES | awk '{{$1=$1}};1' | sort > $IMAGES_FILE
+
+          set +x
+          while read line; do
+            location=$(echo $line | cut -d: -f-2)
+            image_tag=$(echo $line | cut -d: -f3- | awk '{{$1=$1}};1' | cut -d' ' -f2)
+            image=$(echo $image_tag | cut -d : -f 1)
+            tag=$(echo $image_tag | cut -d : -f 2)
+
+            case $tag in
+            *STAGING*)
+              echo "[ERROR] $location: $image:$tag not released"
+              (( err++ ))
+              ;;
+            *SNAPSHOT*)
+              echo "[ERROR] $location: $image:$tag not released"
+              (( err++ ))
+              ;;
+            *latest*)
+              echo "[ERROR] $location: $image:$tag not released"
+              (( err++ ))
+              ;;
+            *)
+              TAGS_FILE=$RELEASE_TAGS_PATH/$image/tags.txt
+              mkdir -p $RELEASE_TAGS_PATH/$image
+              touch $TAGS_FILE
+              grep -q "^$tag\$" $TAGS_FILE
+              if [ $? -ne 0 ]; then
+                # not found; download latest
+                curl -s $NEXUS_RELEASE_PREFIX/$image/tags/list | jq -r '.tags[]' > $TAGS_FILE 2> /dev/null
+                grep -q "^$tag\$" $TAGS_FILE
+                if [ $? -ne 0 ]; then
+                  echo "[ERROR] $location: $image:$tag not released"
+                  (( err++ ))
+                fi
+              fi
+              ;;
+            esac
+          done < $IMAGES_FILE
+          echo $err unreleased images found.
+          exit $err
+
+    publishers:
+      - archive-logs
diff --git a/deployment/onap-lab-ci/jjb/rke-template.yaml b/deployment/onap-lab-ci/jjb/rke-template.yaml
new file mode 100644 (file)
index 0000000..d00fdd6
--- /dev/null
@@ -0,0 +1,241 @@
+---
+- builder:
+    name: run-rke-ete
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+          source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+          ENV_FILE=./env/{lab-name}/onap-oom.env
+          STACK_NAME={stack-name}
+
+          cd $WORKSPACE/deployment/heat/onap-rke/
+          ./scripts/deploy.sh -s $STACK_NAME -d {stack-name}.{lab-name}.onapci.org -i {integration-branch} -o {oom-branch} -q $ENV_FILE
+
+          NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+          K8S_IP=$(openstack stack output show $STACK_NAME k8s_01_vm_ip -c output_value -f value)
+
+          set +x
+          ~/onap-lab-ci/labs/set-dns-record.sh "{stack-name}.{lab-name}" $K8S_IP
+          set -x
+
+          # deploy log and pomba at the end since they're by default disabled in integration-override
+          ssh -i $SSH_KEY ubuntu@$NFS_IP 'sudo su -c "helm deploy dev local/onap -f ~/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f ~/integration-override.yaml --namespace onap --verbose --set log.enabled=true --set pomba.enabled=true"'
+
+          PREV_RESULT=999
+          for n in $(seq 1 8); do
+            echo "Wait for pods to be up, $n of 8"
+            RESULT=$(ssh -i $SSH_KEY ubuntu@$NFS_IP 'sudo su -c "kubectl -n onap get pods"' | grep -vE 'NAME|Completed|Error|1/1|2/2' | wc -l)
+            if [[ $? -eq 0 && ( $RESULT -eq 0 || $RESULT -ge $PREV_RESULT ) ]]; then
+              break
+            fi
+            sleep 15m
+            PREV_RESULT=$RESULT
+          done
+
+          PREV_RESULT=127
+          for n in $(seq 1 8); do
+            echo "Wait for HEALTHCHECK, $n of 8"
+            ROBOT_POD=$(ssh -i $SSH_KEY ubuntu@$NFS_IP 'sudo su -c "kubectl --namespace onap get pods"' | grep robot | sed 's/ .*//')
+            ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap health"'
+            RESULT=$?
+            if [[ $RESULT -lt 20 && ( $RESULT -eq 0 || $RESULT -ge $PREV_RESULT ) ]]; then
+              break
+            fi
+            sleep 15m
+            PREV_RESULT=$RESULT
+          done
+          if [ "$ROBOT_POD" == "" ]; then
+            exit 1
+          fi
+
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep health | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/healthcheck
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/healthcheck
+
+          # record states of pods and containers
+          ssh -i $SSH_KEY root@$NFS_IP 'kubectl get pods -n onap'
+          ssh -i $SSH_KEY root@$NFS_IP "kubectl get pods -n onap -o json" > $WORKSPACE/archives/onap-pods.json
+          ssh -i $SSH_KEY root@$NFS_IP "/root/integration/deployment/heat/onap-rke/scripts/get-image-tags.sh | tee image-tags.log" > $WORKSPACE/archives/image-tags.log
+
+          # demo init
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/demo-k8s.sh onap init"'
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep demo_init | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/demo-init
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/demo-init
+
+          # ete ete
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap ete execscript"'
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep ete_ete | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/ete
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/ete
+
+          # ete instantiate
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap instantiate"'
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep instantiate | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/instantiate
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/instantiate
+
+          # ete portal
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap portal"'
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep portal | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/portal
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/portal
+
+          # ete instantiateDemoVFWCL
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap instantiateDemoVFWCL"'
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep instantiateDemoVFWCL | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/instantiateDemoVFWCL
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/instantiateDemoVFWCL
+
+          # ete sdc-dcae-d
+          ssh -i $SSH_KEY ubuntu@$NFS_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap sdc-dcae-d"'
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep sdc-dcae-d | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/sdc-dcae-d
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/sdc-dcae-d
+
+
+          # demo vfwclosedloop
+          PKG_STACK=$(openstack stack list -f value -c "Stack Name" --sort creation_time:desc | grep Vfmodule_Demo_vFWCLvPKG | head -1)
+          PUBLIC_NET_ID=$(openstack stack show $STACK_NAME -f json | jq -r '.parameters.public_net_id')
+          PUBLIC_NET_NAME=$(openstack network show $PUBLIC_NET_ID -f value -c name)
+          PKG_IP=$(openstack stack resource show $PKG_STACK vpg_server_0 -f json | jq -r ".attributes.addresses.$PUBLIC_NET_NAME[0].addr")
+          echo "/root/oom/kubernetes/robot/demo-k8s.sh onap vfwclosedloop $PKG_IP" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su
+          LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep vfwclosedloop | head -1" | ssh -i $SSH_KEY ubuntu@$NFS_IP sudo su)
+          echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+          mkdir -p $WORKSPACE/archives/vfwclosedloop
+          rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$NFS_IP:/dockerdata-nfs/dev-robot/robot/logs/$LOG_DIR/ $WORKSPACE/archives/vfwclosedloop
+
+
+          cd $WORKSPACE/archives
+          rebot -N "ONAP CI" --removekeywords wuks --output output.xml --merge $(ls -rt */output.xml)
+
+          exit 0
+
+- job-template:
+    disabled_var:
+    triggers_var:
+    name: '{env}-staging-{frequency}'
+    description: 'Staging deployment to {lab-name} {tenant-name}'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+      - timeout:
+          timeout: 720
+          fail: true
+    triggers: '{obj:triggers_var}'
+    builders:
+      - run-rke-ete:
+          stack-name: '{stack-name}'
+          lab-name: '{lab-name}'
+          tenant-name: '{tenant-name}'
+          integration-branch: '{integration-branch}'
+          oom-branch: '{oom-branch}'
+    publishers:
+      - integration-robot
+      - pods-influxdb
+      - archive-logs
+      - trigger-lf-lab-job:
+          lab-name: '{lab-name}'
+
+- job-template:
+    disabled_var:
+    triggers_var:
+    name: '{env}-release-{frequency}'
+    description: 'Release deployment to {lab-name} {tenant-name}'
+    disabled: '{obj:disabled_var}'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+      - timeout:
+          timeout: 720
+          fail: true
+    triggers: '{obj:triggers_var}'
+    builders:
+      - run-rke-ete:
+          stack-name: '{stack-name}'
+          lab-name: '{lab-name}'
+          tenant-name: '{tenant-name}'
+          integration-branch: '{integration-branch}'
+          oom-branch: '{oom-branch}'
+    publishers:
+      - integration-robot
+      - pods-influxdb
+      - archive-logs
+      - trigger-lf-lab-job:
+          lab-name: '{lab-name}'
+
+- builder:
+    name: run-cleanup
+    builders:
+      - shell: |
+          #!/bin/bash
+          set +x
+          . $WORKSPACE/deployment/heat/onap-rke/env/{lab-name}/{tenant-name}-openrc
+          . $JENKINS_HOME/onap-lab-ci/labs/{lab-name}-openrc
+
+          set -x
+          rm -rf $WORKSPACE/archives
+          mkdir -p $WORKSPACE/archives
+
+          SSH_KEY=~/.ssh/onap_key
+
+          openstack project list -f value | while read line; do
+            OS_PROJECT_ID=$(echo $line | cut -d' ' -f1)
+            OS_PROJECT_NAME=$(echo $line | cut -d' ' -f2-)
+
+            openstack stack list -f value -c 'Stack Name' | while read STACK_NAME; do
+              NFS_IP=$(openstack stack output show $STACK_NAME nfs_vm_ip -c output_value -f value)
+              if [ ! -z "$NFS_IP" ]; then
+                # timeout 20 ssh -i $SSH_KEY root@$NFS_IP "helm delete dev-log"
+                # timeout 20 ssh -i $SSH_KEY root@$NFS_IP "helm delete dev-pomba"
+                timeout 20 ssh -i $SSH_KEY root@$NFS_IP "kubectl top pod | sort --reverse --key 2 --numeric | head -20"
+              fi
+            done
+          done
+
+          exit 0
+
+- job-template:
+    name: '{env}-cleanup'
+    description: 'Clean up {lab-name} tenants'
+    project-type: freestyle
+    lab-name: ''
+    tenant-name: ''
+    scm:
+      - git-integration:
+          branch: "{integration-branch}"
+    wrappers:
+      - timestamps
+    builders:
+      - run-cleanup:
+          lab-name: '{lab-name}'
+          tenant-name: '{tenant-name}'
+    publishers:
+      - archive-logs
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb00-oom-resilience-instantiateDemoVFWCL/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb00-oom-resilience-instantiateDemoVFWCL/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..30cbe22
--- /dev/null
@@ -0,0 +1,54 @@
+dev-aai-aai-babel-7cb986869d-pr4xm
+dev-aai-aai-cassandra-0
+dev-aai-aai-data-router-6b4776bfc4-lssst
+dev-aai-aai-elasticsearch-669f4664c-h5fh4
+dev-aai-aai-ff69fc688-r5hc9
+dev-aai-aai-gizmo-9df94f898-64j9t
+dev-aai-aai-graphadmin-5d6d4d86dc-7wvgs
+dev-aai-aai-modelloader-54f7444dcd-xw54c
+dev-aai-aai-resources-8579748958-jqg9x
+dev-aai-aai-search-data-695b878655-wfr5l
+dev-aai-aai-sparky-be-9bcc99756-76j5p
+dev-aai-aai-spike-59c6db6574-nj465
+dev-aai-aai-traversal-7f5669d56-xwvlr
+dev-multicloud-multicloud-7cd96f75fb-vzmq7
+dev-multicloud-multicloud-azure-85548b6469-d6kz9
+dev-multicloud-multicloud-ocata-85d796889b-bppmv
+dev-multicloud-multicloud-pike-78fb779f6c-vn789
+dev-multicloud-multicloud-vio-794f8b7d85-hwnjg
+dev-multicloud-multicloud-windriver-6ffb4dcd-dhnlf
+dev-sdc-sdc-be-6d8f57d5c4-dqs8w
+dev-sdc-sdc-cs-7768fcf944-cg75x
+dev-sdc-sdc-dcae-be-5bb587d95c-dgtwj
+dev-sdc-sdc-dcae-dt-68d8d77578-plvjn
+dev-sdc-sdc-dcae-fe-79675b4bc4-78cgd
+dev-sdc-sdc-dcae-tosca-lab-6d9f59cbf6-fj7jp
+dev-sdc-sdc-es-6d9566f787-dvgx6
+dev-sdc-sdc-fe-7c7c4577d9-bpdhh
+dev-sdc-sdc-kb-85f8df694-4cl7j
+dev-sdc-sdc-onboarding-be-576864dfbc-x8pt8
+dev-sdc-sdc-wfd-be-7b84b4bf97-stlxs
+dev-sdc-sdc-wfd-fe-86597867d6-hw725
+dev-sdnc-controller-blueprints-684f7d865f-4pglc
+dev-sdnc-controller-blueprints-db-0
+dev-sdnc-nengdb-0
+dev-sdnc-network-name-gen-7f54b5649c-w4j8x
+dev-sdnc-sdnc-0
+dev-sdnc-sdnc-ansible-server-8b58d86f5-d6hz2
+dev-sdnc-sdnc-db-0
+dev-sdnc-sdnc-dgbuilder-77866dbfb5-44jds
+dev-sdnc-sdnc-dmaap-listener-6f67f97dd9-dv5r7
+dev-sdnc-sdnc-portal-79f65cfcf8-4jh8b
+dev-sdnc-sdnc-ueb-listener-557c57577c-knssk
+dev-so-so-575dcc7fc5-8g626
+dev-so-so-bpmn-infra-74875c7c-l6pl9
+dev-so-so-catalog-db-adapter-67878b4fc8-mzwg2
+dev-so-so-mariadb-576ccb887f-vh2f2
+dev-so-so-monitoring-7b4fdbf895-szgtp
+dev-so-so-openstack-adapter-85576544b6-xmd62
+dev-so-so-request-db-adapter-658db4b75d-9vlck
+dev-so-so-sdc-controller-679f5dff6d-rxgf5
+dev-so-so-sdnc-adapter-54d99774c9-qgp6w
+dev-so-so-vfc-adapter-5bb49bc97f-7rvzj
+dev-vid-vid-54f8c6d78-jj8vp
+dev-vid-vid-mariadb-galera-0
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb00-oom-resilience-instantiateDemoVFWCL/queue_all_jobs.sh b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb00-oom-resilience-instantiateDemoVFWCL/queue_all_jobs.sh
new file mode 100755 (executable)
index 0000000..d9636ba
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+JOB=windriver-sb00-oom-resilience-instantiateDemoVFWCL
+for POD_TO_DELETE in $(cat ~/onap-lab-ci/jobs/$JOB/pods_to_delete.txt); do
+    echo build "$JOB $POD_TO_DELETE"
+    java -jar ~/jenkins-cli.jar  -s http://localhost:8080/jenkins -auth jenkins:g2jenkins build $JOB -p POD_TO_DELETE=$POD_TO_DELETE
+done
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-healthdist/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-healthdist/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..7f0aa28
--- /dev/null
@@ -0,0 +1,48 @@
+dev-aai-aai-5696cbd55-csck6
+dev-aai-aai-babel-85c85878d5-cdww7
+dev-aai-aai-cassandra-0
+dev-aai-aai-cassandra-1
+dev-aai-aai-cassandra-2
+dev-aai-aai-champ-8598c7bf4f-l5vts
+dev-aai-aai-data-router-7d6b484764-qgcv4
+dev-aai-aai-elasticsearch-669f4664c-75hxk
+dev-aai-aai-gizmo-784bf59bb8-tmdts
+dev-aai-aai-graphadmin-5f9b7645d6-7bg8r
+dev-aai-aai-modelloader-7bb76855b5-7qck6
+dev-aai-aai-resources-55bd894dfc-snsw7
+dev-aai-aai-search-data-8d77c9d-s5g4l
+dev-aai-aai-sparky-be-577b95c76d-9dspz
+dev-aai-aai-spike-59c6db6574-c5nck
+dev-aai-aai-traversal-86f575c7f7-5mgnk
+dev-sdc-sdc-be-694b9b557b-ckk9h
+dev-sdc-sdc-dcae-be-5bb587d95c-9gk2h
+dev-sdc-sdc-dcae-dt-68d8d77578-vsstx
+dev-sdc-sdc-dcae-fe-79675b4bc4-nckl5
+dev-sdc-sdc-dcae-tosca-lab-6d9f59cbf6-k7rpz
+dev-sdc-sdc-es-768f76f64f-kbtgn
+dev-sdc-sdc-fe-6d98d5b7-57rmp
+dev-sdc-sdc-kb-7466db5dd-r2xr4
+dev-sdc-sdc-onboarding-be-76c6779fbf-gv8c9
+dev-sdc-sdc-wfd-be-7b84b4bf97-d9ngz
+dev-sdc-sdc-wfd-fe-86597867d6-bgm7q
+dev-sdnc-controller-blueprints-d9974c9fb-hdbb6
+dev-sdnc-controller-blueprints-db-0
+dev-sdnc-nengdb-0
+dev-sdnc-network-name-gen-d89496744-lrj88
+dev-sdnc-sdnc-0
+dev-sdnc-sdnc-ansible-server-5f8b66b9b4-qfqgj
+dev-sdnc-sdnc-db-0
+dev-sdnc-sdnc-dgbuilder-865fbf655d-2zbw7
+dev-sdnc-sdnc-dmaap-listener-7c9f456bf6-kt49n
+dev-sdnc-sdnc-portal-597cb97887-b4t42
+dev-sdnc-sdnc-ueb-listener-859d9d8b64-g5kkv
+dev-so-so-7c78d4c6cd-g5k7k
+dev-so-so-bpmn-infra-856bb6dd54-nkv55
+dev-so-so-catalog-db-adapter-768cf4c795-s7t5t
+dev-so-so-mariadb-7d548967c8-sbn56
+dev-so-so-monitoring-68b8b96b6b-pzzml
+dev-so-so-openstack-adapter-54f6cdd667-vj2ff
+dev-so-so-request-db-adapter-7758c846f9-6hg4j
+dev-so-so-sdc-controller-6cd84f4797-cbjbn
+dev-so-so-sdnc-adapter-6b7c558f56-hbh9n
+dev-so-so-vfc-adapter-8664b4db56-gmp5p
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-healthdist/queue_all_jobs.sh b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-healthdist/queue_all_jobs.sh
new file mode 100755 (executable)
index 0000000..e8a65bb
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+JOB=windriver-sb01-oom-resilience-healthdist
+for POD_TO_DELETE in $(cat ~/onap-lab-ci/jobs/$JOB/pods_to_delete.txt); do
+    echo build "$JOB $POD_TO_DELETE"
+    java -jar ~/jenkins-cli.jar  -s http://localhost:8080/jenkins -auth jenkins:g2jenkins build $JOB -p POD_TO_DELETE=$POD_TO_DELETE
+done
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-instantiateDemoVFWCL/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-instantiateDemoVFWCL/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..176d4e0
--- /dev/null
@@ -0,0 +1,55 @@
+dev-aai-aai-5696cbd55-sdslw
+dev-aai-aai-babel-85c85878d5-vv6ks
+dev-aai-aai-cassandra-0
+dev-aai-aai-cassandra-1
+dev-aai-aai-cassandra-2
+dev-aai-aai-champ-8598c7bf4f-swbcl
+dev-aai-aai-data-router-7d6b484764-6c6wt
+dev-aai-aai-elasticsearch-669f4664c-q84kf
+dev-aai-aai-gizmo-784bf59bb8-vj8hp
+dev-aai-aai-graphadmin-5f9b7645d6-nl6xp
+dev-aai-aai-modelloader-7bb76855b5-khhws
+dev-aai-aai-resources-55bd894dfc-bq6qm
+dev-aai-aai-search-data-8d77c9d-462bx
+dev-aai-aai-sparky-be-577b95c76d-dqfn6
+dev-aai-aai-spike-59c6db6574-wrlq2
+dev-aai-aai-traversal-86f575c7f7-q5gvh
+dev-multicloud-multicloud-7cd96f75fb-hmgwp
+dev-multicloud-multicloud-azure-85548b6469-m4tqt
+dev-multicloud-multicloud-ocata-cfbc7dd4b-nxqnv
+dev-multicloud-multicloud-pike-57778fb4fc-q8gt2
+dev-multicloud-multicloud-vio-794f8b7d85-7h8qt
+dev-multicloud-multicloud-windriver-75b7659f96-m7kh9
+dev-sdc-sdc-be-694b9b557b-49bk7
+dev-sdc-sdc-cs-76f6c49f5d-r9lng
+dev-sdc-sdc-dcae-be-5bb587d95c-bnbp9
+dev-sdc-sdc-dcae-dt-68d8d77578-9hfrw
+dev-sdc-sdc-dcae-fe-79675b4bc4-lgqql
+dev-sdc-sdc-dcae-tosca-lab-6d9f59cbf6-ltbqg
+dev-sdc-sdc-es-768f76f64f-dqxjb
+dev-sdc-sdc-fe-6d98d5b7-nzc6g
+dev-sdc-sdc-kb-7466db5dd-sdkqk
+dev-sdc-sdc-onboarding-be-76c6779fbf-n82mh
+dev-sdc-sdc-wfd-be-7b84b4bf97-bsr7n
+dev-sdc-sdc-wfd-fe-86597867d6-kkhnt
+dev-sdnc-controller-blueprints-d9974c9fb-xdnnb
+dev-sdnc-controller-blueprints-db-0
+dev-sdnc-nengdb-0
+dev-sdnc-network-name-gen-d89496744-fskhr
+dev-sdnc-sdnc-0
+dev-sdnc-sdnc-ansible-server-5f8b66b9b4-hch2v
+dev-sdnc-sdnc-dgbuilder-865fbf655d-dzg5h
+dev-sdnc-sdnc-dmaap-listener-7c9f456bf6-7hs9x
+dev-sdnc-sdnc-portal-597cb97887-mpp4v
+dev-sdnc-sdnc-ueb-listener-859d9d8b64-7mzr5
+dev-so-so-7c78d4c6cd-8m5mf
+dev-so-so-bpmn-infra-856bb6dd54-zxr8d
+dev-so-so-catalog-db-adapter-768cf4c795-2c97j
+dev-so-so-mariadb-7d548967c8-g6bbr
+dev-so-so-monitoring-68b8b96b6b-7b22z
+dev-so-so-openstack-adapter-54f6cdd667-9lhpq
+dev-so-so-request-db-adapter-7758c846f9-52gwj
+dev-so-so-sdc-controller-6cd84f4797-r6smq
+dev-so-so-sdnc-adapter-6b7c558f56-85j5t
+dev-so-so-vfc-adapter-8664b4db56-2bcd5
+dev-vid-vid-78bc8969d9-blqpj
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-instantiateDemoVFWCL/queue_all_jobs.sh b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-instantiateDemoVFWCL/queue_all_jobs.sh
new file mode 100755 (executable)
index 0000000..1a50b8d
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+JOB=windriver-sb01-oom-resilience-instantiateDemoVFWCL
+for POD_TO_DELETE in $(cat ~/onap-lab-ci/jobs/$JOB/pods_to_delete.txt); do
+    echo build "$JOB $POD_TO_DELETE"
+    java -jar ~/jenkins-cli.jar  -s http://localhost:8080/jenkins -auth jenkins:g2jenkins build $JOB -p POD_TO_DELETE=$POD_TO_DELETE
+done
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-vfwclosedloop/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-vfwclosedloop/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..00574db
--- /dev/null
@@ -0,0 +1,67 @@
+dep-config-binding-service-65986446f4-k7ndb
+dep-dcae-datafile-collector-b58756f44-twlsj
+dep-dcae-hv-ves-collector-689bdfb74-xlqnq
+dep-dcae-prh-7bcfc57dc-v9slh
+dep-dcae-snmptrap-collector-85c97f9db9-lzq4k
+dep-dcae-tca-analytics-f5f879f6f-2mdpp
+dep-dcae-ves-collector-77fdf87d88-z22k5
+dep-deployment-handler-674c8b8449-lzsqz
+dep-holmes-engine-mgmt-5558cb44b8-k7rsr
+dep-holmes-rule-mgmt-876f65647-b9874
+dep-inventory-654547f4-vnsw7
+dep-policy-handler-7cc8db9958-6vqzr
+dep-pstg-write-f67b57947-nz4s8
+dep-service-change-handler-679f589846-dsvn8
+dev-aai-aai-5696cbd55-csck6
+dev-aai-aai-babel-85c85878d5-cdww7
+dev-aai-aai-cassandra-0
+dev-aai-aai-cassandra-1
+dev-aai-aai-cassandra-2
+dev-aai-aai-champ-8598c7bf4f-l5vts
+dev-aai-aai-data-router-7d6b484764-qgcv4
+dev-aai-aai-elasticsearch-669f4664c-75hxk
+dev-aai-aai-gizmo-784bf59bb8-tmdts
+dev-aai-aai-graphadmin-5f9b7645d6-7bg8r
+dev-aai-aai-modelloader-7bb76855b5-7qck6
+dev-aai-aai-resources-55bd894dfc-snsw7
+dev-aai-aai-search-data-8d77c9d-s5g4l
+dev-aai-aai-sparky-be-577b95c76d-9dspz
+dev-aai-aai-spike-59c6db6574-c5nck
+dev-aai-aai-traversal-86f575c7f7-5mgnk
+dev-appc-appc-ansible-server-7fb64488c6-nv4g2
+dev-appc-appc-cdt-6f6784d579-7m69z
+dev-appc-appc-db-0
+dev-appc-appc-db-1
+dev-appc-appc-db-2
+dev-appc-appc-dgbuilder-687d76975d-k4gpx
+dev-dcaegen2-dcae-bootstrap-7c5c8d7d76-rk9lq
+dev-dcaegen2-dcae-cloudify-manager-67b576dd66-p9vj4
+dev-dcaegen2-dcae-db-0
+dev-dcaegen2-dcae-db-1
+dev-dcaegen2-dcae-healthcheck-65c6548689-jtpcr
+dev-dcaegen2-dcae-pgpool-78fbd864cd-nwfr9
+dev-dcaegen2-dcae-pgpool-78fbd864cd-tbn49
+dev-dcaegen2-dcae-redis-0
+dev-dcaegen2-dcae-redis-1
+dev-dcaegen2-dcae-redis-2
+dev-dcaegen2-dcae-redis-3
+dev-dcaegen2-dcae-redis-4
+dev-dcaegen2-dcae-redis-5
+dev-dmaap-dbc-pg-0
+dev-dmaap-dbc-pg-1
+dev-dmaap-dbc-pgpool-7b748d5894-r2fqq
+dev-dmaap-dbc-pgpool-7b748d5894-rxdj9
+dev-dmaap-dmaap-bus-controller-765495b674-c4nlr
+dev-dmaap-dmaap-dr-db-56b956df8d-qlqnm
+dev-dmaap-dmaap-dr-node-64864d5cc-4nzq7
+dev-dmaap-dmaap-dr-prov-69bd7c6665-kb45b
+dev-dmaap-message-router-647bbfc54d-4x7xt
+dev-dmaap-message-router-kafka-64465d9ff4-hgg8w
+dev-policy-brmsgw-d98d8dc5-zxjs5
+dev-policy-drools-0
+dev-policy-nexus-7d5f84d8b6-tq6pd
+dev-policy-pap-cd84c4fff-4ktpw
+dev-policy-pdp-0
+dev-policy-policy-apex-pdp-0
+dev-policy-policy-distribution-669bc685d4-rc2bx
+dev-policy-policydb-74d46689cb-5nvtw
diff --git a/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-vfwclosedloop/queue_all_jobs.sh b/deployment/onap-lab-ci/jobs/casablanca/windriver-sb01-oom-resilience-vfwclosedloop/queue_all_jobs.sh
new file mode 100755 (executable)
index 0000000..fc3aa49
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+JOB=windriver-sb01-oom-resilience-vfwclosedloop
+for POD_TO_DELETE in $(cat ~/onap-lab-ci/jobs/$JOB/pods_to_delete.txt); do
+    echo build "$JOB $POD_TO_DELETE"
+    java -jar ~/jenkins-cli.jar  -s http://localhost:8080/jenkins -auth jenkins:g2jenkins build $JOB -p POD_TO_DELETE=$POD_TO_DELETE
+done
diff --git a/deployment/onap-lab-ci/jobs/dublin/queue_all_jobs.sh b/deployment/onap-lab-ci/jobs/dublin/queue_all_jobs.sh
new file mode 100755 (executable)
index 0000000..e993d54
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+if [ $# -ne 1 ] || [ ! -f $1 ]; then
+    echo file not found
+    exit 1
+fi
+
+PODS_FILE=$1
+DIR=$(dirname "$(readlink -f "$PODS_FILE")")
+JOB=$(basename $DIR)
+echo $JOB
+for POD_TO_DELETE in $(cat $PODS_FILE); do
+    echo build "$JOB $POD_TO_DELETE"
+    java -jar ~/jenkins-cli.jar  -s http://localhost:8080/jenkins -auth jenkins:g2jenkins build $JOB -p POD_TO_DELETE=$POD_TO_DELETE
+done
diff --git a/deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-healthdist/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-healthdist/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..602dd21
--- /dev/null
@@ -0,0 +1,49 @@
+dev-aai-aai-77d8c648b4-qzsqp
+dev-aai-aai-babel-6c4f8878bd-2qcn7
+dev-aai-aai-champ-5777c455dd-n24fn
+dev-aai-aai-data-router-d4cc5584d-5rfzl
+dev-aai-aai-elasticsearch-75bcf895c5-jhlxv
+dev-aai-aai-gizmo-78f58874d7-g7qqj
+dev-aai-aai-graphadmin-85b977c7df-4rwg5
+dev-aai-aai-modelloader-7cf5648d8f-kkcpz
+dev-aai-aai-resources-6964945c8f-7fr69
+dev-aai-aai-schema-service-6c644cdcf6-wrn8z
+dev-aai-aai-search-data-66b7d59877-4t4sl
+dev-aai-aai-sparky-be-56df767478-5r6sl
+dev-aai-aai-spike-f958fdf6-k5qg8
+dev-aai-aai-traversal-cd7c55df8-hqkgq
+dev-cassandra-cassandra-0
+dev-sdc-sdc-be-56d8f658-t8hlt
+dev-sdc-sdc-dcae-be-6dd4df6cf6-bg2kv
+dev-sdc-sdc-dcae-dt-6f669b4d4c-l4h8l
+dev-sdc-sdc-dcae-fe-684f4495b7-9d5qv
+dev-sdc-sdc-dcae-tosca-lab-74cf68f5ff-drrh6
+dev-sdc-sdc-es-76c4b9f57b-ltdtm
+dev-sdc-sdc-fe-6577f8fb4f-rc8hl
+dev-sdc-sdc-kb-559976fd47-v7m5l
+dev-sdc-sdc-onboarding-be-84cc8bb944-9jv6m
+dev-sdc-sdc-wfd-be-56c7747ffc-7j5wl
+dev-sdc-sdc-wfd-fe-c487c8b75-gjrbj
+dev-sdnc-cds-blueprints-processor-7b4bfdb55f-745nh
+dev-sdnc-cds-command-executor-fbb9857b-vmbww
+dev-sdnc-cds-controller-blueprints-6649d85898-sb8wr
+dev-sdnc-cds-db-0
+dev-sdnc-cds-sdc-listener-695c8fdc75-6kt4f
+dev-sdnc-cds-ui-794bbf9fb5-s7jpb
+dev-sdnc-nengdb-0
+dev-sdnc-network-name-gen-5bd65b8f99-85vwm
+dev-sdnc-sdnc-0
+dev-sdnc-sdnc-ansible-server-57bdc47678-nztcz
+dev-sdnc-sdnc-dgbuilder-86bd576dc9-j7b2s
+dev-sdnc-sdnc-dmaap-listener-858d6568d4-grd25
+dev-sdnc-sdnc-ueb-listener-6756b97f98-hmpmx
+dev-so-so-bpmn-infra-5bfc99c6d8-qkkkp
+dev-so-so-catalog-db-adapter-767dd889f5-647pc
+dev-so-so-df69f5bb7-sfw54
+dev-so-so-monitoring-848c945fb6-qm9cj
+dev-so-so-openstack-adapter-5cfb9d7cc9-fkjcq
+dev-so-so-request-db-adapter-579dccb955-79msn
+dev-so-so-sdc-controller-58f5cbbb9f-4llzm
+dev-so-so-sdnc-adapter-75545c8b6b-mxkwq
+dev-so-so-vfc-adapter-6b7d95f564-xncbj
+dev-so-so-vnfm-adapter-796b97b6b7-pgzwt
diff --git a/deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-instantiateDemoVFWCL/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-instantiateDemoVFWCL/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..7873d2e
--- /dev/null
@@ -0,0 +1,75 @@
+dev-aai-aai-77d8c648b4-rg924
+dev-aai-aai-babel-6c4f8878bd-rjc2x
+dev-aai-aai-champ-5777c455dd-7lrd5
+dev-aai-aai-data-router-d4cc5584d-4q5s6
+dev-aai-aai-elasticsearch-75bcf895c5-dpstp
+dev-aai-aai-gizmo-78f58874d7-mz2ns
+dev-aai-aai-graphadmin-85b977c7df-7pdl7
+dev-aai-aai-modelloader-7cf5648d8f-frbgz
+dev-aai-aai-resources-6964945c8f-ctpln
+dev-aai-aai-schema-service-6c644cdcf6-hbl2d
+dev-aai-aai-search-data-66b7d59877-7nwpg
+dev-aai-aai-sparky-be-56df767478-5x9vp
+dev-aai-aai-spike-f958fdf6-sgr57
+dev-aai-aai-traversal-cd7c55df8-fjfxx
+dev-cassandra-cassandra-0
+dev-mariadb-galera-mariadb-galera-0
+dev-multicloud-multicloud-976ff7b8-gl4fd
+dev-multicloud-multicloud-azure-589669c4b5-bs7fx
+dev-multicloud-multicloud-fcaps-59779c855-hv7wc
+dev-multicloud-multicloud-k8s-6dfb987cc6-wwqvq
+dev-multicloud-multicloud-k8s-etcd-0
+dev-multicloud-multicloud-k8s-mongo-0
+dev-multicloud-multicloud-lenovo-db4fd57f5-kdksx
+dev-multicloud-multicloud-ocata-88cb4c8c7-n2n4l
+dev-multicloud-multicloud-pike-6b899d5d-zddct
+dev-multicloud-multicloud-starlingx-779f94d5cf-tcj2t
+dev-multicloud-multicloud-vio-76db6c7fbf-nm6x2
+dev-multicloud-multicloud-windriver-5cd7574896-g55t8
+dev-policy-brmsgw-6d4f9c6567-xgfjq
+dev-policy-drools-0
+dev-policy-nexus-6f6fcb6675-dkg6x
+dev-policy-pap-654966b459-td565
+dev-policy-pdp-0
+dev-policy-policy-apex-pdp-0
+dev-policy-policy-api-8bbff857d-lm99r
+dev-policy-policy-distribution-8497d87554-zhqtd
+dev-policy-policy-pap-6d65cb4f99-7ww7k
+dev-policy-policy-xacml-pdp-65bbc9697f-z88c5
+dev-policy-policydb-7f56989bf-47vxc
+dev-sdc-sdc-be-56d8f658-q7rst
+dev-sdc-sdc-dcae-be-6dd4df6cf6-nhtxj
+dev-sdc-sdc-dcae-dt-6f669b4d4c-vsswr
+dev-sdc-sdc-dcae-fe-684f4495b7-g4t47
+dev-sdc-sdc-dcae-tosca-lab-74cf68f5ff-mq4c2
+dev-sdc-sdc-es-76c4b9f57b-gdgq8
+dev-sdc-sdc-fe-6577f8fb4f-97g9c
+dev-sdc-sdc-kb-559976fd47-pwfzb
+dev-sdc-sdc-onboarding-be-84cc8bb944-62z4z
+dev-sdc-sdc-wfd-be-56c7747ffc-5td7j
+dev-sdc-sdc-wfd-fe-c487c8b75-mjnfc
+dev-sdnc-cds-blueprints-processor-7b4bfdb55f-gdrqv
+dev-sdnc-cds-command-executor-fbb9857b-qnd6w
+dev-sdnc-cds-controller-blueprints-6649d85898-lbxsk
+dev-sdnc-cds-db-0
+dev-sdnc-cds-sdc-listener-695c8fdc75-24hmd
+dev-sdnc-cds-ui-794bbf9fb5-fxj6j
+dev-sdnc-nengdb-0
+dev-sdnc-network-name-gen-5bd65b8f99-8z26h
+dev-sdnc-sdnc-0
+dev-sdnc-sdnc-ansible-server-57bdc47678-cdw5l
+dev-sdnc-sdnc-dgbuilder-86bd576dc9-g46l6
+dev-sdnc-sdnc-dmaap-listener-858d6568d4-2fbfl
+dev-sdnc-sdnc-ueb-listener-6756b97f98-vjtwl
+dev-so-so-bpmn-infra-5bfc99c6d8-2nzp5
+dev-so-so-catalog-db-adapter-767dd889f5-rffbc
+dev-so-so-df69f5bb7-d95lx
+dev-so-so-monitoring-848c945fb6-zbpjw
+dev-so-so-openstack-adapter-5cfb9d7cc9-8gmh8
+dev-so-so-request-db-adapter-579dccb955-2jxhb
+dev-so-so-sdc-controller-58f5cbbb9f-hjltw
+dev-so-so-sdnc-adapter-75545c8b6b-hlqcz
+dev-so-so-vfc-adapter-6b7d95f564-6gf8s
+dev-so-so-vnfm-adapter-796b97b6b7-xtkrc
+dev-vid-vid-5547b8d577-lvggt
+dev-vid-vid-mariadb-galera-0
diff --git a/deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-vfwclosedloop/pods_to_delete.txt b/deployment/onap-lab-ci/jobs/dublin/windriver-sb02-resilience-vfwclosedloop/pods_to_delete.txt
new file mode 100644 (file)
index 0000000..0bdcea2
--- /dev/null
@@ -0,0 +1,66 @@
+dep-dcae-dashboard-5c9ddbd7d5-bwbhk
+dep-dcae-hv-ves-collector-7f8bcddc4b-mnzv2
+dep-dcae-prh-5695c8b78c-rnfx5
+dep-dcae-snmptrap-collector-96d5858f6-tnk27
+dep-dcae-tca-analytics-7868c85b85-mtn45
+dep-dcae-ves-collector-66b74dffbf-knt4j
+dep-holmes-engine-mgmt-54cf76654f-rsslm
+dep-holmes-rule-mgmt-7b7f94b59-9hjls
+dev-aai-aai-77d8c648b4-p4b4w
+dev-aai-aai-babel-6c4f8878bd-vg2d2
+dev-aai-aai-champ-5777c455dd-xmbrb
+dev-aai-aai-data-router-d4cc5584d-4mgkd
+dev-aai-aai-elasticsearch-75bcf895c5-v82w8
+dev-aai-aai-gizmo-78f58874d7-gdgc8
+dev-aai-aai-graphadmin-85b977c7df-2jkfl
+dev-aai-aai-modelloader-7cf5648d8f-xj9rr
+dev-aai-aai-resources-6964945c8f-brdfs
+dev-aai-aai-schema-service-6c644cdcf6-7lr2n
+dev-aai-aai-search-data-66b7d59877-gc69g
+dev-aai-aai-sparky-be-56df767478-wzs5g
+dev-aai-aai-spike-f958fdf6-c7hqh
+dev-aai-aai-traversal-cd7c55df8-9tkg5
+dev-appc-appc-0
+dev-appc-appc-ansible-server-0
+dev-appc-appc-cdt-856b465f4-4pq2p
+dev-appc-appc-db-0
+dev-appc-appc-dgbuilder-5fbfccb55-snf2t
+dev-cassandra-cassandra-0
+dev-dcaegen2-dcae-bootstrap-56bf5f5b77-hk9dv
+dev-dcaegen2-dcae-cloudify-manager-5d5cd6d667-66lcn
+dev-dcaegen2-dcae-config-binding-service-7c9466d94b-n6f6d
+dev-dcaegen2-dcae-db-0
+dev-dcaegen2-dcae-deployment-handler-76bc887fcd-czxzg
+dev-dcaegen2-dcae-healthcheck-7f5d5586f5-bl5bm
+dev-dcaegen2-dcae-inv-pg-0
+dev-dcaegen2-dcae-inv-pgpool-75cf498b5f-hzcl4
+dev-dcaegen2-dcae-inv-pgpool-75cf498b5f-xpnh7
+dev-dcaegen2-dcae-inventory-api-6bd7f59547-72cfl
+dev-dcaegen2-dcae-pgpool-7f64667b9-5sjhr
+dev-dcaegen2-dcae-pgpool-7f64667b9-9fdq2
+dev-dcaegen2-dcae-policy-handler-768579866d-qkn9j
+dev-dcaegen2-dcae-redis-0
+dev-dcaegen2-dcae-servicechange-handler-687664985d-n52mv
+dev-dmaap-dbc-pg-0
+dev-dmaap-dbc-pgpool-5fb9d89d5c-c4fbn
+dev-dmaap-dbc-pgpool-5fb9d89d5c-tgrp4
+dev-dmaap-dmaap-bc-8654446979-bst95
+dev-dmaap-dmaap-dr-db-0
+dev-dmaap-dmaap-dr-node-0
+dev-dmaap-dmaap-dr-prov-84b6f85d98-8hv5l
+dev-dmaap-message-router-0
+dev-dmaap-message-router-kafka-0
+dev-dmaap-message-router-mirrormaker-748b75fc48-5llpm
+dev-dmaap-message-router-zookeeper-0
+dev-mariadb-galera-mariadb-galera-0
+dev-policy-brmsgw-6d4f9c6567-mt4g4
+dev-policy-drools-0
+dev-policy-nexus-6f6fcb6675-h7dnf
+dev-policy-pap-654966b459-w4bsd
+dev-policy-pdp-0
+dev-policy-policy-apex-pdp-0
+dev-policy-policy-api-8bbff857d-qx5xm
+dev-policy-policy-distribution-8497d87554-nhl5d
+dev-policy-policy-pap-6d65cb4f99-lqhzj
+dev-policy-policy-xacml-pdp-65bbc9697f-s8c5s
+dev-policy-policydb-7f56989bf-llqtm
diff --git a/deployment/onap-lab-ci/jobs/get-result.sh b/deployment/onap-lab-ci/jobs/get-result.sh
new file mode 100755 (executable)
index 0000000..7ec39be
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+JOB=$1
+BUILD=$2
+
+mkdir -p $JOB
+JSON=$JOB/$BUILD.json
+if [ ! -f $JSON ]; then
+    curl -s "http://localhost:8080/jenkins/job/$JOB/$BUILD/api/json" > $JSON
+fi
+
+POD_TO_DELETE=$(jq -r '.actions[] | select(._class == "hudson.model.ParametersAction") | .parameters[] | select(._class == "hudson.model.StringParameterValue") | .value' < $JSON)
+
+TIMESTAMP=$(jq '.timestamp' < $JSON)
+START_TIME=$(date -d @$(($TIMESTAMP/1000)) +%H:%M:%S)
+
+DURATION=$(jq '.duration' < $JSON)
+DURATION_TIME=$(date -ud @$(($DURATION/1000)) +%M:%S)
+
+RESULT=$(jq -r '.result' < $JSON)
+
+echo "|$POD_TO_DELETE|$START_TIME|$DURATION_TIME|$RESULT|[$BUILD|http://onapci.org/logs/$JOB/$BUILD/]|"
diff --git a/deployment/onap-lab-ci/scripts/load_all.sh b/deployment/onap-lab-ci/scripts/load_all.sh
new file mode 100755 (executable)
index 0000000..230fa8b
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash -x
+./load_influx.sh tlab-heat-daily 1 104
+./load_influx.sh tlab-oom-daily 1 110
+./load_influx.sh windriver-heat-daily 1 106
+./load_influx.sh windriver-oom-daily 1 110
diff --git a/deployment/onap-lab-ci/scripts/load_influx.sh b/deployment/onap-lab-ci/scripts/load_influx.sh
new file mode 100755 (executable)
index 0000000..7194880
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+if [ "$#" -ne 3 ]; then
+    echo "$0 <job> <start_build> <end_build>"
+    exit 1
+fi
+JOB_NAME=$1
+START_BUILD=$2
+END_BUILD=$3
+
+set -x
+for BUILD_NUMBER in $(seq $START_BUILD $END_BUILD); do
+    ./process-robot.sh ~/jobs/$JOB_NAME/builds/$BUILD_NUMBER/robot-plugin/output.xml $JOB_NAME $BUILD_NUMBER
+done
diff --git a/deployment/onap-lab-ci/scripts/mirror-nexus.sh b/deployment/onap-lab-ci/scripts/mirror-nexus.sh
new file mode 100755 (executable)
index 0000000..94efaa1
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+
+if [ "$#" -ne 1 ]; then
+    echo "$0 <repo>"
+    echo "   where <repo> is releases or staging"
+    exit 1
+fi
+
+set -x
+REPO=$1
+
+LOG_DIR=/var/www/html/logs/mirror-nexus/$REPO/
+mkdir -p $LOG_DIR
+
+LOG_FILE=$LOG_DIR/$(date +%FT%TZ).log
+TAR_FILE=$REPO-$(date +%F).tar
+
+MIRRORS_DIR=/var/www/html/mirrors/nexus.onap.org
+REPO_DIR=$MIRRORS_DIR/$REPO
+mkdir -p $REPO_DIR
+cd $REPO_DIR
+
+wget -nv --mirror --random-wait --no-if-modified-since --no-parent -e robots=off --reject "index.html*" -nH --cut-dirs=3 "https://nexus.onap.org/content/repositories/$REPO/" -o /dev/stdout | sed -u "s|URL:https://nexus.onap.org/content/repositories/$REPO/||g" | sed -u 's| ->.*||g' > $LOG_FILE
+
+cd $MIRRORS_DIR
+tar cvf $TAR_FILE.part $REPO/
+mv -b $TAR_FILE.part $TAR_FILE
diff --git a/deployment/onap-lab-ci/scripts/process-pods.sh b/deployment/onap-lab-ci/scripts/process-pods.sh
new file mode 100755 (executable)
index 0000000..fada454
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+if [ "$#" -ne 3 ]; then
+    echo "$0 <onap-pods.json> <job> <build>"
+    exit 1
+fi
+JSON_OUTPUT=$1
+JOB=$2
+BUILD=$3
+
+INFLUX_ENDPOINT='http://10.145.123.16:8086/write?db=robot'
+
+
+TIME=$(date -r $JSON_OUTPUT +%s%N)
+
+POINTS_FILE=/tmp/points-$JOB-$BUILD-pods.txt
+rm -f $POINTS_FILE
+
+cat $JSON_OUTPUT | jq -r  '.items[] | ( (.status.containerStatuses[] | ( " "+.image + " " + (.restartCount | tostring) + " " + (.ready | tostring) ) ) ) + " " + .metadata.name ' | grep -e 'onap/' -e 'openecomp/' | sort | while read CONTAINER; do
+    IMAGE=$(echo $CONTAINER | cut -d ' ' -f 1 | sed -r 's#.*/(onap|openecomp)/##g')
+    RESTART_COUNT=$(echo $CONTAINER | cut -d ' ' -f 2)
+    READY=$(echo $CONTAINER | cut -d ' ' -f 3)
+    POD=$(echo $CONTAINER | cut -d ' ' -f 4)
+
+    if [ "$READY" = "true" ] && [ "$RESTART_COUNT" -eq 0 ]; then
+        PASS=1
+        FAIL=0
+    else
+        PASS=0
+        FAIL=1
+    fi
+
+    # currently assumes that no onap pod contains multiple containers of with the same image
+    echo container,job=$JOB,image=$IMAGE,pod=$POD build=$BUILD,restartCount=$RESTART_COUNT,ready=$READY,pass=$PASS,fail=$FAIL $TIME | tee -a $POINTS_FILE
+done
+
+curl -i $INFLUX_ENDPOINT --data-binary @$POINTS_FILE
diff --git a/deployment/onap-lab-ci/scripts/process-robot.sh b/deployment/onap-lab-ci/scripts/process-robot.sh
new file mode 100755 (executable)
index 0000000..e902bf3
--- /dev/null
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+if [ "$#" -ne 3 ]; then
+    echo "$0 <output.xml> <job> <build>"
+    exit 1
+fi
+ROBOT_OUTPUT=$1
+JOB=$2
+BUILD=$3
+
+INFLUX_ENDPOINT='http://10.145.123.16:8086/write?db=robot'
+
+TMP_XML=/tmp/output-$JOB-$BUILD.xml
+
+if [ ! -f $TMP_XML ]; then
+    xmlstarlet ed -d '//kw' -d '//timeout' -d '//tags' $ROBOT_OUTPUT | tr -d '\n' > $TMP_XML
+
+    # Canonicalize Robot suite names
+    sed -i 's/ONAP.Verify/ONAP_CI/g' $TMP_XML
+    sed -i 's/ONAP.Daily/ONAP_CI/g' $TMP_XML
+    sed -i 's/OpenECOMP.ETE/ONAP_CI/g' $TMP_XML
+fi
+
+
+TIMESTR=$(xmlstarlet sel -t -v "/robot/@generated" $TMP_XML)
+TIME=$(date -d "${TIMESTR}Z" +%s%N)
+
+POINTS_FILE=/tmp/points-$JOB-$BUILD.txt
+rm -f $POINTS_FILE
+
+# test
+xmlstarlet sel -t -m "//test" -c "." -n $TMP_XML | while read test; do
+    NAME=$(echo "$test" | xmlstarlet sel -t -v "/test/@name" | tr ' ' '_' | xmlstarlet unesc)
+    if [ "PASS" = $(echo "$test" | xmlstarlet sel -t -v "/test/status/@status" ) ]; then
+        PASS=1
+        FAIL=0
+    else
+        PASS=0
+        FAIL=1
+    fi
+    STARTTIME=$(date -d "$(echo $test | xmlstarlet sel -t -v "/test/status/@starttime")Z" +%s%N)
+    ENDTIME=$(date -d "$(echo $test | xmlstarlet sel -t -v "/test/status/@endtime")Z" +%s%N)
+    echo test,job=$JOB,name=$NAME build=$BUILD,pass=$PASS,fail=$FAIL,starttime=$STARTTIME,endtime=$ENDTIME $TIME | tee -a $POINTS_FILE
+done
+
+# suite
+xmlstarlet sel -t -m "/robot/statistics/suite/stat" -c "." -n $TMP_XML | while read suite; do
+    ID=$(echo "$suite" | xmlstarlet sel -t -v "/stat/@id" )
+    STATUS=$(xmlstarlet sel -t -m "//suite[@id=\"$ID\"]/status" -c "." -n $TMP_XML)
+    STARTTIMESTR=$(echo $STATUS | xmlstarlet sel -t -v "/status/@starttime")
+    ENDTIMESTR=$(echo $STATUS | xmlstarlet sel -t -v "/status/@endtime")
+    NAME=$(echo "$suite" | xmlstarlet sel -t -m "/stat" -v . | tr ' ' '_' | xmlstarlet unesc)
+    PASS=$(echo "$suite" | xmlstarlet sel -t -v "/stat/@pass" )
+    FAIL=$(echo "$suite" | xmlstarlet sel -t -v "/stat/@fail" )
+    if [ "$STARTTIMESTR" != "N/A" ] && [ "$ENDTIMESTR" != "N/A" ]; then
+       STARTTIME=$(date -d "${STARTTIMESTR}Z" +%s%N)
+       ENDTIME=$(date -d "${ENDTIMESTR}Z" +%s%N)
+       echo suite,job=$JOB,name=$NAME build=$BUILD,pass=$PASS,fail=$FAIL,starttime=$STARTTIME,endtime=$ENDTIME $TIME | tee -a $POINTS_FILE
+    else
+       echo suite,job=$JOB,name=$NAME build=$BUILD,pass=$PASS,fail=$FAIL $TIME | tee -a $POINTS_FILE
+    fi
+done
+
+# tag
+xmlstarlet sel -t -m "/robot/statistics/tag/stat" -c "." -n $TMP_XML | while read tag; do
+    NAME=$(echo "$tag" | xmlstarlet sel -t -m "/stat" -v . | tr ' ' '_' | xmlstarlet unesc)
+    PASS=$(echo "$tag" | xmlstarlet sel -t -v "/stat/@pass" )
+    FAIL=$(echo "$tag" | xmlstarlet sel -t -v "/stat/@fail" )
+    echo tag,job=$JOB,name=$NAME build=$BUILD,pass=$PASS,fail=$FAIL $TIME | tee -a $POINTS_FILE
+done
+
+curl -i $INFLUX_ENDPOINT --data-binary @$POINTS_FILE