First draft of k8s deployment using RKE 17/83417/1
authorGary Wu <gary.i.wu@huawei.com>
Tue, 26 Mar 2019 20:08:29 +0000 (13:08 -0700)
committerGary Wu <gary.i.wu@huawei.com>
Tue, 26 Mar 2019 21:55:16 +0000 (14:55 -0700)
Change-Id: Ifa0eb52b64438df64692aaf58b9ef8e5dd7fd32c
Issue-ID: INT-993
Signed-off-by: Gary Wu <gary.i.wu@huawei.com>
33 files changed:
.gitignore
deployment/heat/onap-rke/env/windriver/Integration-HEAT-Daily-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-HEAT-Staging-Daily-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-HEAT-Verify-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-OOM-Daily-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-OOM-Staging-Daily-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-OOM-Verify-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-00-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-01-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-02-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-03-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-04-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-05-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-06-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/Integration-SB-07-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/OOF-openrc [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/onap-oom-light.env [new file with mode: 0644]
deployment/heat/onap-rke/env/windriver/onap-oom.env [new file with mode: 0644]
deployment/heat/onap-rke/k8s_vm_init.sh [new file with mode: 0644]
deployment/heat/onap-rke/k8s_vm_init_serv.sh [new file with mode: 0644]
deployment/heat/onap-rke/k8s_vm_install.sh [new file with mode: 0644]
deployment/heat/onap-rke/onap-oom.yaml [new file with mode: 0644]
deployment/heat/onap-rke/parts/onap-oom-1.yaml [new file with mode: 0644]
deployment/heat/onap-rke/parts/onap-oom-2.yaml [new file with mode: 0644]
deployment/heat/onap-rke/parts/onap-oom-3.yaml [new file with mode: 0644]
deployment/heat/onap-rke/rancher_vm_entrypoint.sh [new file with mode: 0644]
deployment/heat/onap-rke/scripts/Crypto.java [new file with mode: 0644]
deployment/heat/onap-rke/scripts/cleanup.sh [new file with mode: 0755]
deployment/heat/onap-rke/scripts/deploy.sh [new file with mode: 0755]
deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh [new file with mode: 0755]
deployment/heat/onap-rke/scripts/prepull-docker.sh [new file with mode: 0755]
deployment/heat/onap-rke/scripts/redeploy-module.sh [new file with mode: 0644]
deployment/heat/onap-rke/scripts/redeploy.sh [new file with mode: 0755]

index e6ef23d..6c1ca2a 100644 (file)
@@ -26,3 +26,6 @@ csit/
 *.iml
 /.pydevproject
 /bin/
+*.class
+*.csar
+benchmark/
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-HEAT-Daily-openrc b/deployment/heat/onap-rke/env/windriver/Integration-HEAT-Daily-openrc
new file mode 100644 (file)
index 0000000..3a3bb32
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=56d245b3c668419380d3b0f912497e29
+export OS_PROJECT_NAME=Integration-HEAT-Daily
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-HEAT-Staging-Daily-openrc b/deployment/heat/onap-rke/env/windriver/Integration-HEAT-Staging-Daily-openrc
new file mode 100644 (file)
index 0000000..fa3825c
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=3583253e932845a09cd4c8ca2f31d095
+export OS_PROJECT_NAME=Integration-HEAT-Staging-Daily
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-HEAT-Verify-openrc b/deployment/heat/onap-rke/env/windriver/Integration-HEAT-Verify-openrc
new file mode 100644 (file)
index 0000000..624db1c
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=28481f6939614cfd83e6767a0e039bcc
+export OS_PROJECT_NAME=Integration-HEAT-Verify
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-OOM-Daily-openrc b/deployment/heat/onap-rke/env/windriver/Integration-OOM-Daily-openrc
new file mode 100644 (file)
index 0000000..b0eddb6
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=712b6016580e410b9abfec9ca34953ce
+export OS_PROJECT_NAME=Integration-OOM-Daily
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-OOM-Staging-Daily-openrc b/deployment/heat/onap-rke/env/windriver/Integration-OOM-Staging-Daily-openrc
new file mode 100644 (file)
index 0000000..062f3d1
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=09d8566ea45e43aa974cf447ed591d77
+export OS_PROJECT_NAME=Integration-OOM-Staging-Daily
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-OOM-Verify-openrc b/deployment/heat/onap-rke/env/windriver/Integration-OOM-Verify-openrc
new file mode 100644 (file)
index 0000000..1b48e4a
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=c27d16b88a4141ec8abcc07e731c0f24
+export OS_PROJECT_NAME=Integration-OOM-Verify
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-00-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-00-openrc
new file mode 100644 (file)
index 0000000..8b55dbd
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=41d6d38489bd40b09ea8a6b6b852dcbd
+export OS_PROJECT_NAME=Integration-SB-00
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-01-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-01-openrc
new file mode 100644 (file)
index 0000000..78469b2
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=087050388b204c73a3e418dd2c1fe30b
+export OS_PROJECT_NAME=Integration-SB-01
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-02-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-02-openrc
new file mode 100644 (file)
index 0000000..3531c11
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=ebb0ea7144004bacac1e39ff23105fa7
+export OS_PROJECT_NAME=Integration-SB-02
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-03-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-03-openrc
new file mode 100644 (file)
index 0000000..f6dbb7a
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=bc43d50ffcb84750bac0c1707a9a765b
+export OS_PROJECT_NAME=Integration-SB-03
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-04-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-04-openrc
new file mode 100644 (file)
index 0000000..0a27051
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=d570c718cbc545029f40e50b75eb13df
+export OS_PROJECT_NAME=Integration-SB-04
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-05-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-05-openrc
new file mode 100644 (file)
index 0000000..9bc0846
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=b8ad3842ab3642f7bf3fbe4e4d3b9f86
+export OS_PROJECT_NAME=Integration-SB-05
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-06-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-06-openrc
new file mode 100644 (file)
index 0000000..c7b650e
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=7fad299815104c0a8f90a8df80343f03
+export OS_PROJECT_NAME=Integration-SB-06
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/Integration-SB-07-openrc b/deployment/heat/onap-rke/env/windriver/Integration-SB-07-openrc
new file mode 100644 (file)
index 0000000..faf6b4b
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=1e097c6713e74fd7ac8e4295e605ee1e
+export OS_PROJECT_NAME=Integration-SB-07
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/OOF-openrc b/deployment/heat/onap-rke/env/windriver/OOF-openrc
new file mode 100644 (file)
index 0000000..dec995f
--- /dev/null
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=6bbd2981b210461dbc8fe846df1a7808
+export OS_PROJECT_NAME=OOF
+export OS_USER_DOMAIN_NAME=Default
+export OS_USERNAME=demo
+export OS_PASSWORD=onapdemo
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_REGION_NAME=RegionOne
+export OS_INTERFACE=public
diff --git a/deployment/heat/onap-rke/env/windriver/onap-oom-light.env b/deployment/heat/onap-rke/env/windriver/onap-oom-light.env
new file mode 100644 (file)
index 0000000..c3f6159
--- /dev/null
@@ -0,0 +1,87 @@
+parameters:
+
+  ubuntu_1604_image: ubuntu-16-04-cloud-amd64
+
+  apt_proxy: 10.12.5.2:8000
+  docker_proxy: 10.12.5.2:5000
+
+  rancher_vm_flavor: m1.large
+  # use a smaller image for k8 hosts
+  k8s_vm_flavor: m1.xlarge
+
+  public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4
+
+  oam_network_cidr: 10.0.0.0/16
+
+  integration_override_yaml: >
+    global:
+      repository: __docker_proxy__
+      pullPolicy: IfNotPresent
+    robot:
+      enabled: true
+      openStackKeyStoneUrl: "http://10.12.25.2:5000"
+      openStackPublicNetId: "__public_net_id__"
+      openStackTenantId: "${OS_PROJECT_ID}"
+      openStackUserName: "${OS_USERNAME}"
+      ubuntu14Image: "ubuntu-14-04-cloud-amd64"
+      ubuntu16Image: "ubuntu-16-04-cloud-amd64"
+      openStackPrivateNetId: "__oam_network_id__"
+      openStackPrivateSubnetId: "__oam_subnet_id__"
+      openStackPrivateNetCidr: "__oam_network_cidr__"
+      openStackOamNetworkCidrPrefix: "10.0"
+      dcaeCollectorIp: "__k8s_01_vm_ip__"
+      vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+      demoArtifactsVersion: "1.2.2"
+      scriptVersion: "1.2.1"
+      rancherIpAddress: "__rancher_ip_addr__"
+      config:
+        openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
+    so:
+      enabled: true
+      so-catalog-db-adapter:
+        config:
+          openStackUserName: "${OS_USERNAME}"
+          openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+          openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
+    appc:
+      enabled: true
+      replicaCount: 1
+      config:
+        enableClustering: false
+    sdnc:
+      enabled: true
+      replicaCount: 1
+      config:
+        enableClustering: false
+    clamp:
+      enabled: false
+    pomba:
+      enabled: false
+    cli:
+      enabled: false
+    consul:
+      enabled: false
+    dcaegen2:
+      enabled: false
+    esr:
+      enabled: false
+    log:
+      enabled: false
+    mock:
+      enabled: false
+    msb:
+      enabled: false
+    multicloud:
+      enabled: false
+    nbi:
+      enabled: false
+    oof:
+      enabled: false
+    policy:
+      enabled: false
+    uui:
+      enabled: false
+    vfc:
+      enabled: false
+    vnfsdk:
+      enabled: false
diff --git a/deployment/heat/onap-rke/env/windriver/onap-oom.env b/deployment/heat/onap-rke/env/windriver/onap-oom.env
new file mode 100644 (file)
index 0000000..bf98171
--- /dev/null
@@ -0,0 +1,146 @@
+parameters:
+
+  ubuntu_1804_image: ubuntu-18.04
+
+  apt_proxy: 10.12.5.2:8000
+  docker_proxy: 10.12.5.2:5000
+
+  rancher_vm_flavor: m1.large
+  k8s_vm_flavor: m2.xlarge
+  etcd_vm_flavor: m1.medium
+  orch_vm_flavor: m1.large
+
+  public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4
+
+  oam_network_cidr: 10.0.0.0/16
+
+  integration_gerrit_branch: master
+
+  helm_deploy_delay: 2.5m
+
+  integration_override_yaml: >
+    global:
+      repository: __docker_proxy__
+      pullPolicy: IfNotPresent
+    robot:
+      enabled: true
+      flavor: large
+      appcUsername: "appc@appc.onap.org"
+      appcPassword: "demo123456!"
+      openStackKeyStoneUrl: "http://10.12.25.2:5000"
+      openStackPublicNetId: "__public_net_id__"
+      openStackTenantId: "${OS_PROJECT_ID}"
+      openStackUserName: "${OS_USERNAME}"
+      ubuntu14Image: "ubuntu-14-04-cloud-amd64"
+      ubuntu16Image: "ubuntu-16-04-cloud-amd64"
+      openStackPrivateNetId: "__oam_network_id__"
+      openStackPrivateSubnetId: "__oam_subnet_id__"
+      openStackPrivateNetCidr: "__oam_network_cidr__"
+      openStackSecurityGroup: "__sec_group__"
+      openStackOamNetworkCidrPrefix: "10.0"
+      dcaeCollectorIp: "__k8s_01_vm_ip__"
+      vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+      demoArtifactsVersion: "1.3.0"
+      demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+      scriptVersion: "1.3.0"
+      rancherIpAddress: "__rancher_ip_addr__"
+      config:
+        openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
+    so:
+      enabled: true
+      so-catalog-db-adapter:
+        config:
+          openStackUserName: "${OS_USERNAME}"
+          openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+          openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
+    appc:
+      enabled: true
+      replicaCount: 3
+      config:
+        enableClustering: true
+        openStackType: "OpenStackProvider"
+        openStackName: "OpenStack"
+        openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+        openStackServiceTenantName: "${OS_PROJECT_NAME}"
+        openStackDomain: "${OS_USER_DOMAIN_NAME}"
+        openStackUserName: "${OS_USERNAME}"
+        openStackEncryptedPassword: "${OS_PASSWORD}"
+    sdnc:
+      enabled: true
+      replicaCount: 3
+      config:
+        enableClustering: true
+    aai:
+      enabled: true
+      liveness:
+        initialDelaySeconds: 120
+      aai-data-router:
+        liveness:
+          initialDelaySeconds: 120
+      aai-sparky-be:
+        liveness:
+          initialDelaySeconds: 120
+      aai-spike:
+        liveness:
+          initialDelaySeconds: 120
+    portal:
+      enabled: true
+      global:
+        portalHostName: "__portal_hostname__"
+      portal-mariadb:
+        config:
+          sdcFeHostName: "__portal_hostname__"
+          papHostName: "__portal_hostname__"
+          vidHostName: "__portal_hostname__"
+          aaiSparkyHostName: "__portal_hostname__"
+          cliHostName: "__portal_hostname__"
+          portalSdkHostName: "__portal_hostname__"
+          dmaapBcHostName: "__portal_hostname__"
+          msbHostName: "__portal_hostname__"
+    vid:
+      enabled: true
+      config:
+        portalhost: "__portal_hostname__"
+
+    aaf:
+      enabled: true
+    cassandra:
+      enabled: true
+    clamp:
+      enabled: true
+    cli:
+      enabled: true
+    consul:
+      enabled: true
+    contrib:
+      enabled: true
+    dcaegen2:
+      enabled: true
+    dmaap:
+      enabled: true
+    esr:
+      enabled: true
+    log:
+      enabled: true
+    sniro-emulator:
+      enabled: true
+    oof:
+      enabled: true
+    msb:
+      enabled: true
+    multicloud:
+      enabled: true
+    nbi:
+      enabled: true
+    policy:
+      enabled: true
+    pomba:
+      enabled: true
+    sdc:
+      enabled: true
+    uui:
+      enabled: true
+    vfc:
+      enabled: true
+    vnfsdk:
+      enabled: true
diff --git a/deployment/heat/onap-rke/k8s_vm_init.sh b/deployment/heat/onap-rke/k8s_vm_init.sh
new file mode 100644 (file)
index 0000000..aceb6a9
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash -x
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+while [ ! -e /dockerdata-nfs/.git ]; do
+    mount /dockerdata-nfs
+    sleep 10
+done
diff --git a/deployment/heat/onap-rke/k8s_vm_init_serv.sh b/deployment/heat/onap-rke/k8s_vm_init_serv.sh
new file mode 100644 (file)
index 0000000..1536077
--- /dev/null
@@ -0,0 +1,98 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides:          k8s_vm_init.sh
+# Required-Start:    $remote_fs $syslog
+# Required-Stop:     $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Start daemon at boot time
+# Description:       Enable service provided by daemon.
+### END INIT INFO
+
+dir="/opt"
+cmd="./k8s_vm_init.sh"
+user="root"
+
+name=`basename $0`
+pid_file="/var/run/$name.pid"
+stdout_log="/var/log/$name.log"
+stderr_log="/var/log/$name.err"
+
+get_pid() {
+    cat "$pid_file"
+}
+
+is_running() {
+    [ -f "$pid_file" ] && ps `get_pid` > /dev/null 2>&1
+}
+
+case "$1" in
+    start)
+    if is_running; then
+        echo "Already started"
+    else
+        echo "Starting $name"
+        cd "$dir"
+        if [ -z "$user" ]; then
+            sudo $cmd >> "$stdout_log" 2>> "$stderr_log" &
+        else
+            sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" &
+        fi
+        echo $! > "$pid_file"
+        if ! is_running; then
+            echo "Unable to start, see $stdout_log and $stderr_log"
+            exit 1
+        fi
+    fi
+    ;;
+    stop)
+    if is_running; then
+        echo -n "Stopping $name.."
+        kill `get_pid`
+        for i in {1..10}
+        do
+            if ! is_running; then
+                break
+            fi
+
+            echo -n "."
+            sleep 1
+        done
+        echo
+
+        if is_running; then
+            echo "Not stopped; may still be shutting down or shutdown may have failed"
+            exit 1
+        else
+            echo "Stopped"
+            if [ -f "$pid_file" ]; then
+                rm "$pid_file"
+            fi
+        fi
+    else
+        echo "Not running"
+    fi
+    ;;
+    restart)
+    $0 stop
+    if is_running; then
+        echo "Unable to stop, will not attempt to start"
+        exit 1
+    fi
+    $0 start
+    ;;
+    status)
+    if is_running; then
+        echo "Running"
+    else
+        echo "Stopped"
+        exit 1
+    fi
+    ;;
+    *)
+    echo "Usage: $0 {start|stop|restart|status}"
+    exit 1
+    ;;
+esac
+
+exit 0
diff --git a/deployment/heat/onap-rke/k8s_vm_install.sh b/deployment/heat/onap-rke/k8s_vm_install.sh
new file mode 100644 (file)
index 0000000..bc538f8
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/bash -x
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+export DEBIAN_FRONTEND=noninteractive
+echo "__host_private_ip_addr__ $(hostname)" >> /etc/hosts
+printenv
+
+mkdir -p /opt/config
+echo "__docker_version__" > /opt/config/docker_version.txt
+echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
+echo "__rancher_private_ip_addr__" > /opt/config/rancher_private_ip_addr.txt
+echo "__host_private_ip_addr__" > /opt/config/host_private_ip_addr.txt
+echo "__mtu__" > /opt/config/mtu.txt
+
+mkdir -p /etc/docker
+if [ ! -z "__docker_proxy__" ]; then
+    cat > /etc/docker/daemon.json <<EOF
+{
+  "mtu": __mtu__,
+  "insecure-registries" : ["__docker_proxy__"]
+}
+EOF
+else
+    cat > /etc/docker/daemon.json <<EOF
+{
+  "mtu": __mtu__
+}
+EOF
+fi
+if [ ! -z "__apt_proxy__" ]; then
+    cat > /etc/apt/apt.conf.d/30proxy <<EOF
+Acquire::http { Proxy "http://__apt_proxy__"; };
+Acquire::https::Proxy "DIRECT";
+EOF
+fi
+
+
+mkdir -p /dockerdata-nfs
+echo "__rancher_private_ip_addr__:/dockerdata-nfs /dockerdata-nfs nfs noauto,noatime,fg,retry=1,x-systemd.automount,_netdev,soft,nolock,intr,tcp,actimeo=1800 0 0" | tee -a /etc/fstab
+
+# workaround for OpenStack intermittent failure to change default apt mirrors
+sed -i 's|http://archive.ubuntu.com|http://nova.clouds.archive.ubuntu.com|g' /etc/apt/sources.list
+
+while ! hash jq &> /dev/null; do
+    apt-get -y update
+    # apt-get -y dist-upgrade
+    apt-get -y install apt-transport-https ca-certificates curl software-properties-common jq nfs-common docker.io
+    systemctl enable docker
+    usermod -aG docker ubuntu
+    sleep 10
+done
+
+# Enable autorestart when VM reboots
+update-rc.d k8s_vm_init_serv defaults
diff --git a/deployment/heat/onap-rke/onap-oom.yaml b/deployment/heat/onap-rke/onap-oom.yaml
new file mode 100644 (file)
index 0000000..c999327
--- /dev/null
@@ -0,0 +1,959 @@
+#
+# Generated by scripts/gen-onap-oom-yaml.sh; MANUAL CHANGES WILL BE LOST
+#
+heat_template_version: 2015-10-15
+description: ONAP on RKE Kubernetes using OOM
+
+parameters:
+  docker_proxy:
+    type: string
+
+  apt_proxy:
+    type: string
+
+  public_net_id:
+    type: string
+    description: The ID of the Public network for floating IP address allocation
+
+  oam_network_cidr:
+    type: string
+    description: CIDR of the OAM ONAP network
+
+  ubuntu_1804_image:
+    type: string
+    description: Name of the Ubuntu 18.04 image
+
+  rancher_vm_flavor:
+    type: string
+    description: VM flavor for Rancher
+
+  k8s_vm_flavor:
+    type: string
+    description: VM flavor for k8s hosts
+
+  etcd_vm_flavor:
+    type: string
+    description: VM flavor for etcd hosts
+
+  orch_vm_flavor:
+    type: string
+    description: VM flavor for orch hosts
+
+  integration_override_yaml:
+    type: string
+    description: Content for integration_override.yaml
+
+  integration_gerrit_branch:
+    type: string
+    default: "master"
+
+  integration_gerrit_refspec:
+    type: string
+    default: ""
+
+  oom_gerrit_branch:
+    type: string
+    default: "master"
+
+  oom_gerrit_refspec:
+    type: string
+    default: ""
+
+  docker_manifest:
+    type: string
+    default: ""
+
+  key_name:
+    type: string
+    default: "onap_key"
+
+  docker_version:
+    type: string
+    default: "17.03.2"
+
+  rancher_version:
+    type: string
+    default: "1.6.26"
+
+  rancher_agent_version:
+    type: string
+    default: "1.2.11"
+
+  kubectl_version:
+    type: string
+    default: "1.13.4"
+
+  helm_version:
+    type: string
+    default: "2.9.1"
+
+  helm_deploy_delay:
+    type: string
+    default: "3m"
+
+  use_ramdisk:
+    type: string
+    description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/.
+    default: "false"
+
+  mtu:
+    type: number
+    default: 1500
+
+  portal_hostname:
+    type: string
+    description: The FQDN of the k8s host that will be used for the Portal UI component URLs; this needs to be resolveable at the client
+    default: "portal.api.simpledemo.onap.org"
+
+resources:
+  random-str:
+    type: OS::Heat::RandomString
+    properties:
+      length: 4
+
+  # ONAP security group
+  onap_sg:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name:
+        str_replace:
+          template: base_rand
+          params:
+            base: onap_sg
+            rand: { get_resource: random-str }
+      description: security group used by ONAP
+      rules:
+        # All egress traffic
+        - direction: egress
+          ethertype: IPv4
+        - direction: egress
+          ethertype: IPv6
+        # ingress traffic
+        # ICMP
+        - protocol: icmp
+        - protocol: udp
+          port_range_min: 1
+          port_range_max: 65535
+        - protocol: tcp
+          port_range_min: 1
+          port_range_max: 65535
+        # Protocols used for vLB/vDNS use case
+        - protocol: 47
+        - protocol: 53
+        - protocol: 132
+
+
+  # ONAP management private network
+  oam_network:
+    type: OS::Neutron::Net
+    properties:
+      name:
+        str_replace:
+          template: oam_network_rand
+          params:
+            rand: { get_resource: random-str }
+
+  oam_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name:
+        str_replace:
+          template: oam_network_rand
+          params:
+            rand: { get_resource: random-str }
+      network_id: { get_resource: oam_network }
+      cidr: { get_param: oam_network_cidr }
+      dns_nameservers: [ "8.8.8.8" ]
+
+  router:
+    type: OS::Neutron::Router
+    properties:
+      name:
+        list_join: ['-', [{ get_param: 'OS::stack_name' }, 'router']]
+      external_gateway_info:
+        network: { get_param: public_net_id }
+
+  router_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: { get_resource: router }
+      subnet_id: { get_resource: oam_subnet }
+
+  rancher_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  rancher_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: rancher_private_port }
+
+  rancher_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [{ get_param: 'OS::stack_name' }, 'rancher']]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: rancher_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: rancher_private_port }
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template:
+            get_file: rancher_vm_entrypoint.sh
+          params:
+            __docker_proxy__: { get_param: docker_proxy }
+            __apt_proxy__: { get_param: apt_proxy }
+            __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+            __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+            __integration_override_yaml__: { get_param: integration_override_yaml }
+            __integration_gerrit_branch__: { get_param: integration_gerrit_branch }
+            __integration_gerrit_refspec__: { get_param: integration_gerrit_refspec }
+            __oom_gerrit_branch__: { get_param: oom_gerrit_branch }
+            __oom_gerrit_refspec__: { get_param: oom_gerrit_refspec }
+            __docker_manifest__: { get_param: docker_manifest }
+            __docker_version__: { get_param: docker_version }
+            __rancher_version__: { get_param: rancher_version }
+            __rancher_agent_version__: { get_param: rancher_agent_version }
+            __kubectl_version__: { get_param: kubectl_version }
+            __helm_version__: { get_param: helm_version }
+            __helm_deploy_delay__: { get_param: helm_deploy_delay }
+            __use_ramdisk__: { get_param: use_ramdisk }
+            __mtu__: { get_param: mtu }
+            __portal_hostname__: { get_param: portal_hostname }
+            __public_net_id__: { get_param: public_net_id }
+            __oam_network_cidr__: { get_param: oam_network_cidr }
+            __oam_network_id__: { get_resource: oam_network }
+            __oam_subnet_id__: { get_resource: oam_subnet }
+            __sec_group__: { get_resource: onap_sg }
+            __k8s_01_vm_ip__: { get_attr: [k8s_01_floating_ip, floating_ip_address] }
+            __k8s_vm_ips__: [
+              get_attr: [k8s_01_floating_ip, floating_ip_address],
+              get_attr: [k8s_02_floating_ip, floating_ip_address],
+              get_attr: [k8s_03_floating_ip, floating_ip_address],
+              get_attr: [k8s_04_floating_ip, floating_ip_address],
+              get_attr: [k8s_05_floating_ip, floating_ip_address],
+              get_attr: [k8s_06_floating_ip, floating_ip_address],
+            ]
+            __k8s_private_ips__: [
+              get_attr: [k8s_01_floating_ip, fixed_ip_address],
+              get_attr: [k8s_02_floating_ip, fixed_ip_address],
+              get_attr: [k8s_03_floating_ip, fixed_ip_address],
+              get_attr: [k8s_04_floating_ip, fixed_ip_address],
+              get_attr: [k8s_05_floating_ip, fixed_ip_address],
+              get_attr: [k8s_06_floating_ip, fixed_ip_address],
+            ]
+  k8s_01_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  k8s_01_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_01_private_port }
+
+  k8s_01_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [k8s_01_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [k8s_01_floating_ip, fixed_ip_address] }
+                __host_label__: 'compute'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  k8s_01_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: k8s_01_vm_scripts }
+
+  k8s_01_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '01' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: k8s_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: k8s_01_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: k8s_01_vm_config }
+
+  k8s_02_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  k8s_02_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_02_private_port }
+
+  k8s_02_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [k8s_02_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [k8s_02_floating_ip, fixed_ip_address] }
+                __host_label__: 'compute'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  k8s_02_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: k8s_02_vm_scripts }
+
+  k8s_02_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '02' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: k8s_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: k8s_02_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: k8s_02_vm_config }
+
+  k8s_03_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  k8s_03_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_03_private_port }
+
+  k8s_03_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [k8s_03_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [k8s_03_floating_ip, fixed_ip_address] }
+                __host_label__: 'compute'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  k8s_03_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: k8s_03_vm_scripts }
+
+  k8s_03_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '03' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: k8s_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: k8s_03_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: k8s_03_vm_config }
+
+  k8s_04_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  k8s_04_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_04_private_port }
+
+  k8s_04_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [k8s_04_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [k8s_04_floating_ip, fixed_ip_address] }
+                __host_label__: 'compute'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  k8s_04_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: k8s_04_vm_scripts }
+
+  k8s_04_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '04' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: k8s_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: k8s_04_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: k8s_04_vm_config }
+
+  k8s_05_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  k8s_05_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_05_private_port }
+
+  k8s_05_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [k8s_05_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [k8s_05_floating_ip, fixed_ip_address] }
+                __host_label__: 'compute'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  k8s_05_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: k8s_05_vm_scripts }
+
+  k8s_05_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '05' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: k8s_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: k8s_05_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: k8s_05_vm_config }
+
+  k8s_06_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  k8s_06_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: k8s_06_private_port }
+
+  k8s_06_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [k8s_06_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [k8s_06_floating_ip, fixed_ip_address] }
+                __host_label__: 'compute'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  k8s_06_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: k8s_06_vm_scripts }
+
+  k8s_06_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '06' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: k8s_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: k8s_06_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: k8s_06_vm_config }
+
+  orch_1_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  orch_1_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: orch_1_private_port }
+
+  orch_1_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [orch_1_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [orch_1_floating_ip, fixed_ip_address] }
+                __host_label__: 'orchestration'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  orch_1_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: orch_1_vm_scripts }
+
+  orch_1_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'orch', '1' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: orch_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: orch_1_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: orch_1_vm_config }
+
+  orch_2_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  orch_2_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: orch_2_private_port }
+
+  orch_2_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [orch_2_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [orch_2_floating_ip, fixed_ip_address] }
+                __host_label__: 'orchestration'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  orch_2_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: orch_2_vm_scripts }
+
+  orch_2_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'orch', '2' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: orch_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: orch_2_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: orch_2_vm_config }
+
+  orch_3_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  orch_3_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: orch_3_private_port }
+
+  orch_3_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [orch_3_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [orch_3_floating_ip, fixed_ip_address] }
+                __host_label__: 'orchestration'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  orch_3_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: orch_3_vm_scripts }
+
+  orch_3_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, 'orch', '3' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: orch_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: orch_3_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: orch_3_vm_config }
+
+outputs:
+  rancher_vm_ip:
+    description: The IP address of the rancher instance
+    value: { get_attr: [rancher_floating_ip, floating_ip_address] }
+
+  k8s_01_vm_ip:
+    description: The IP address of the k8s_01 instance
+    value: { get_attr: [k8s_01_floating_ip, floating_ip_address] }
+
+  k8s_01_vm_private_ip:
+    description: The private IP address of the k8s_01 instance
+    value: { get_attr: [k8s_01_floating_ip, fixed_ip_address] }
+
+  k8s_02_vm_ip:
+    description: The IP address of the k8s_02 instance
+    value: { get_attr: [k8s_02_floating_ip, floating_ip_address] }
+
+  k8s_02_vm_private_ip:
+    description: The private IP address of the k8s_02 instance
+    value: { get_attr: [k8s_02_floating_ip, fixed_ip_address] }
+
+  k8s_03_vm_ip:
+    description: The IP address of the k8s_03 instance
+    value: { get_attr: [k8s_03_floating_ip, floating_ip_address] }
+
+  k8s_03_vm_private_ip:
+    description: The private IP address of the k8s_03 instance
+    value: { get_attr: [k8s_03_floating_ip, fixed_ip_address] }
+
+  k8s_04_vm_ip:
+    description: The IP address of the k8s_04 instance
+    value: { get_attr: [k8s_04_floating_ip, floating_ip_address] }
+
+  k8s_04_vm_private_ip:
+    description: The private IP address of the k8s_04 instance
+    value: { get_attr: [k8s_04_floating_ip, fixed_ip_address] }
+
+  k8s_05_vm_ip:
+    description: The IP address of the k8s_05 instance
+    value: { get_attr: [k8s_05_floating_ip, floating_ip_address] }
+
+  k8s_05_vm_private_ip:
+    description: The private IP address of the k8s_05 instance
+    value: { get_attr: [k8s_05_floating_ip, fixed_ip_address] }
+
+  k8s_06_vm_ip:
+    description: The IP address of the k8s_06 instance
+    value: { get_attr: [k8s_06_floating_ip, floating_ip_address] }
+
+  k8s_06_vm_private_ip:
+    description: The private IP address of the k8s_06 instance
+    value: { get_attr: [k8s_06_floating_ip, fixed_ip_address] }
+
+  orch_1_vm_ip:
+    description: The IP address of the orch_1 instance
+    value: { get_attr: [orch_1_floating_ip, floating_ip_address] }
+
+  orch_1_vm_private_ip:
+    description: The private IP address of the orch_1 instance
+    value: { get_attr: [orch_1_floating_ip, fixed_ip_address] }
+
+  orch_2_vm_ip:
+    description: The IP address of the orch_2 instance
+    value: { get_attr: [orch_2_floating_ip, floating_ip_address] }
+
+  orch_2_vm_private_ip:
+    description: The private IP address of the orch_2 instance
+    value: { get_attr: [orch_2_floating_ip, fixed_ip_address] }
+
+  orch_3_vm_ip:
+    description: The IP address of the orch_3 instance
+    value: { get_attr: [orch_3_floating_ip, floating_ip_address] }
+
+  orch_3_vm_private_ip:
+    description: The private IP address of the orch_3 instance
+    value: { get_attr: [orch_3_floating_ip, fixed_ip_address] }
+
diff --git a/deployment/heat/onap-rke/parts/onap-oom-1.yaml b/deployment/heat/onap-rke/parts/onap-oom-1.yaml
new file mode 100644 (file)
index 0000000..b1676b2
--- /dev/null
@@ -0,0 +1,192 @@
+heat_template_version: 2015-10-15
+description: ONAP on RKE Kubernetes using OOM
+
+parameters:
+  docker_proxy:
+    type: string
+
+  apt_proxy:
+    type: string
+
+  public_net_id:
+    type: string
+    description: The ID of the Public network for floating IP address allocation
+
+  oam_network_cidr:
+    type: string
+    description: CIDR of the OAM ONAP network
+
+  ubuntu_1804_image:
+    type: string
+    description: Name of the Ubuntu 18.04 image
+
+  rancher_vm_flavor:
+    type: string
+    description: VM flavor for Rancher
+
+  k8s_vm_flavor:
+    type: string
+    description: VM flavor for k8s hosts
+
+  etcd_vm_flavor:
+    type: string
+    description: VM flavor for etcd hosts
+
+  orch_vm_flavor:
+    type: string
+    description: VM flavor for orch hosts
+
+  integration_override_yaml:
+    type: string
+    description: Content for integration_override.yaml
+
+  integration_gerrit_branch:
+    type: string
+    default: "master"
+
+  integration_gerrit_refspec:
+    type: string
+    default: ""
+
+  oom_gerrit_branch:
+    type: string
+    default: "master"
+
+  oom_gerrit_refspec:
+    type: string
+    default: ""
+
+  docker_manifest:
+    type: string
+    default: ""
+
+  key_name:
+    type: string
+    default: "onap_key"
+
+  docker_version:
+    type: string
+    default: "17.03.2"
+
+  rancher_version:
+    type: string
+    default: "1.6.26"
+
+  rancher_agent_version:
+    type: string
+    default: "1.2.11"
+
+  kubectl_version:
+    type: string
+    default: "1.13.4"
+
+  helm_version:
+    type: string
+    default: "2.9.1"
+
+  helm_deploy_delay:
+    type: string
+    default: "3m"
+
+  use_ramdisk:
+    type: string
+    description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/.
+    default: "false"
+
+  mtu:
+    type: number
+    default: 1500
+
+  portal_hostname:
+    type: string
+    description: The FQDN of the k8s host that will be used for the Portal UI component URLs; this needs to be resolveable at the client
+    default: "portal.api.simpledemo.onap.org"
+
+resources:
+  random-str:
+    type: OS::Heat::RandomString
+    properties:
+      length: 4
+
+  # ONAP security group
+  onap_sg:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name:
+        str_replace:
+          template: base_rand
+          params:
+            base: onap_sg
+            rand: { get_resource: random-str }
+      description: security group used by ONAP
+      rules:
+        # All egress traffic
+        - direction: egress
+          ethertype: IPv4
+        - direction: egress
+          ethertype: IPv6
+        # ingress traffic
+        # ICMP
+        - protocol: icmp
+        - protocol: udp
+          port_range_min: 1
+          port_range_max: 65535
+        - protocol: tcp
+          port_range_min: 1
+          port_range_max: 65535
+        # Protocols used for vLB/vDNS use case
+        - protocol: 47
+        - protocol: 53
+        - protocol: 132
+
+
+  # ONAP management private network
+  oam_network:
+    type: OS::Neutron::Net
+    properties:
+      name:
+        str_replace:
+          template: oam_network_rand
+          params:
+            rand: { get_resource: random-str }
+
+  oam_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      name:
+        str_replace:
+          template: oam_network_rand
+          params:
+            rand: { get_resource: random-str }
+      network_id: { get_resource: oam_network }
+      cidr: { get_param: oam_network_cidr }
+      dns_nameservers: [ "8.8.8.8" ]
+
+  router:
+    type: OS::Neutron::Router
+    properties:
+      name:
+        list_join: ['-', [{ get_param: 'OS::stack_name' }, 'router']]
+      external_gateway_info:
+        network: { get_param: public_net_id }
+
+  router_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: { get_resource: router }
+      subnet_id: { get_resource: oam_subnet }
+
+  rancher_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  rancher_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: rancher_private_port }
+
diff --git a/deployment/heat/onap-rke/parts/onap-oom-2.yaml b/deployment/heat/onap-rke/parts/onap-oom-2.yaml
new file mode 100644 (file)
index 0000000..bd4ba1f
--- /dev/null
@@ -0,0 +1,70 @@
+  ${VM_TYPE}_${VM_NUM}_private_port:
+    type: OS::Neutron::Port
+    properties:
+      network: { get_resource: oam_network }
+      fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+      security_groups:
+      - { get_resource: onap_sg }
+
+  ${VM_TYPE}_${VM_NUM}_floating_ip:
+    type: OS::Neutron::FloatingIP
+    properties:
+      floating_network_id: { get_param: public_net_id }
+      port_id: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+
+  ${VM_TYPE}_${VM_NUM}_vm_scripts:
+    type: OS::Heat::CloudConfig
+    properties:
+      cloud_config:
+        power_state:
+          mode: reboot
+        runcmd:
+        - [ /opt/k8s_vm_install.sh ]
+        write_files:
+        - path: /opt/k8s_vm_install.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __docker_proxy__: { get_param: docker_proxy }
+                __apt_proxy__: { get_param: apt_proxy }
+                __docker_version__: { get_param: docker_version }
+                __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+                __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+                __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+                __mtu__: { get_param: mtu }
+              template:
+                get_file: k8s_vm_install.sh
+        - path: /opt/k8s_vm_init.sh
+          permissions: '0755'
+          content:
+            str_replace:
+              params:
+                __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+                __host_label__: '$HOST_LABEL'
+              template:
+                get_file: k8s_vm_init.sh
+        - path: /etc/init.d/k8s_vm_init_serv
+          permissions: '0755'
+          content:
+            get_file: k8s_vm_init_serv.sh
+
+  ${VM_TYPE}_${VM_NUM}_vm_config:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_scripts }
+
+  ${VM_TYPE}_${VM_NUM}_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [ { get_param: 'OS::stack_name' }, '${VM_TYPE}', '${VM_NUM}' ] ]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: ${VM_TYPE}_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+      user_data_format: SOFTWARE_CONFIG
+      user_data: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_config }
+
diff --git a/deployment/heat/onap-rke/parts/onap-oom-3.yaml b/deployment/heat/onap-rke/parts/onap-oom-3.yaml
new file mode 100644 (file)
index 0000000..8dc35b6
--- /dev/null
@@ -0,0 +1,5 @@
+outputs:
+  rancher_vm_ip:
+    description: The IP address of the rancher instance
+    value: { get_attr: [rancher_floating_ip, floating_ip_address] }
+
diff --git a/deployment/heat/onap-rke/rancher_vm_entrypoint.sh b/deployment/heat/onap-rke/rancher_vm_entrypoint.sh
new file mode 100644 (file)
index 0000000..3dfc1bd
--- /dev/null
@@ -0,0 +1,242 @@
+#!/bin/bash -x
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+
+# allow root login
+export HOME=/root
+mkdir -p ~/.ssh
+cp ~ubuntu/.ssh/authorized_keys ~/.ssh
+
+export DEBIAN_FRONTEND=noninteractive
+HOST_IP=$(hostname -I)
+echo $HOST_IP `hostname` >> /etc/hosts
+printenv
+
+mkdir -p /opt/config
+echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
+echo "__k8s_vm_ips__" > /opt/config/k8s_vm_ips.txt
+echo "__k8s_private_ips__" > /opt/config/k8s_private_ips.txt
+echo "__public_net_id__" > /opt/config/public_net_id.txt
+echo "__oam_network_cidr__" > /opt/config/oam_network_cidr.txt
+echo "__oam_network_id__" > /opt/config/oam_network_id.txt
+echo "__oam_subnet_id__" > /opt/config/oam_subnet_id.txt
+echo "__sec_group__" > /opt/config/sec_group.txt
+echo "__integration_gerrit_branch__" > /opt/config/integration_gerrit_branch.txt
+echo "__integration_gerrit_refspec__" > /opt/config/integration_gerrit_refspec.txt
+echo "__oom_gerrit_branch__" > /opt/config/oom_gerrit_branch.txt
+echo "__oom_gerrit_refspec__" > /opt/config/oom_gerrit_refspec.txt
+echo "__docker_manifest__" > /opt/config/docker_manifest.txt
+echo "__docker_proxy__" > /opt/config/docker_proxy.txt
+echo "__docker_version__" > /opt/config/docker_version.txt
+echo "__rancher_version__" > /opt/config/rancher_version.txt
+echo "__rancher_agent_version__" > /opt/config/rancher_agent_version.txt
+echo "__kubectl_version__" > /opt/config/kubectl_version.txt
+echo "__helm_version__" > /opt/config/helm_version.txt
+echo "__helm_deploy_delay__" > /opt/config/helm_deploy_delay.txt
+echo "__mtu__" > /opt/config/mtu.txt
+echo "__portal_hostname__" > /opt/config/portal_hostname.txt
+
+cat <<EOF > /opt/config/integration-override.yaml
+__integration_override_yaml__
+EOF
+sed -i 's/\_\_portal_hostname__/__portal_hostname__/g' /opt/config/integration-override.yaml
+sed -i 's/\_\_public_net_id__/__public_net_id__/g' /opt/config/integration-override.yaml
+sed -i 's|\_\_oam_network_cidr__|__oam_network_cidr__|g' /opt/config/integration-override.yaml
+sed -i 's/\_\_oam_network_id__/__oam_network_id__/g' /opt/config/integration-override.yaml
+sed -i 's/\_\_oam_subnet_id__/__oam_subnet_id__/g' /opt/config/integration-override.yaml
+sed -i 's/\_\_sec_group__/__sec_group__/g' /opt/config/integration-override.yaml
+sed -i 's/\_\_rancher_ip_addr__/__rancher_ip_addr__/g' /opt/config/integration-override.yaml
+sed -i 's/\_\_k8s_01_vm_ip__/__k8s_01_vm_ip__/g' /opt/config/integration-override.yaml
+sed -i 's/\_\_docker_proxy__/__docker_proxy__/g' /opt/config/integration-override.yaml
+cp /opt/config/integration-override.yaml /root
+cat /root/integration-override.yaml
+
+mkdir -p /etc/docker
+if [ ! -z "__docker_proxy__" ]; then
+    cat > /etc/docker/daemon.json <<EOF
+{
+  "mtu": __mtu__,
+  "insecure-registries" : ["__docker_proxy__"]
+}
+EOF
+else
+    cat > /etc/docker/daemon.json <<EOF
+{
+  "mtu": __mtu__
+}
+EOF
+fi
+if [ ! -z "__apt_proxy__" ]; then
+    cat > /etc/apt/apt.conf.d/30proxy<<EOF
+Acquire::http { Proxy "http://__apt_proxy__"; };
+Acquire::https::Proxy "DIRECT";
+EOF
+fi
+
+# workaround for OpenStack intermittent failure to change default apt mirrors
+sed -i 's|http://archive.ubuntu.com|http://nova.clouds.archive.ubuntu.com|g' /etc/apt/sources.list
+
+while ! hash jq &> /dev/null; do
+    apt-get -y update
+    apt-get -y install apt-transport-https ca-certificates curl software-properties-common jq make nfs-kernel-server moreutils
+    sleep 10
+done
+
+mkdir -p /dockerdata-nfs
+
+# use RAM disk for /dockerdata-nfs for testing
+if [ "__use_ramdisk__" = "true" ]; then
+    echo "tmpfs /dockerdata-nfs tmpfs noatime,size=75% 1 2" >> /etc/fstab
+    mount /dockerdata-nfs
+fi
+
+# update and initialize git
+git config --global user.email root@rancher
+git config --global user.name root@rancher
+git config --global log.decorate auto
+
+# version control the persistence volume to see what's happening
+chmod 777 /dockerdata-nfs/
+chown nobody:nogroup /dockerdata-nfs/
+cd /dockerdata-nfs/
+git init
+git config user.email "root@onap"
+git config user.name "root"
+git add -A
+git commit -m "initial commit"
+
+# export NFS mount
+echo "/dockerdata-nfs *(rw,fsid=1,async,no_root_squash,no_subtree_check)" | tee /etc/exports
+
+
+exportfs -a
+systemctl restart nfs-kernel-server
+
+cd ~
+
+# install kubectl __kubectl_version__
+curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v__kubectl_version__/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+
+# install helm __helm_version__
+mkdir -p helm
+pushd helm
+wget -q http://storage.googleapis.com/kubernetes-helm/helm-v__helm_version__-linux-amd64.tar.gz
+tar -zxvf helm-v__helm_version__-linux-amd64.tar.gz
+sudo cp linux-amd64/helm /usr/local/bin/helm
+popd
+
+NAMESPACE=onap
+
+# wait for /root/.kube/config to show up
+while [ ! -e /root/.kube/config ]; do
+    sleep 1m
+done
+
+
+export KUBECONFIG=/root/.kube/config
+kubectl config view
+
+
+
+# Enable auto-completion for kubectl
+echo "source <(kubectl completion bash)" >> ~/.bashrc
+
+
+# wait for kubernetes to initialze
+sleep 3m
+until [ $(kubectl get pods --namespace kube-system | tail -n +2 | grep -c Running) -ge 6 ]; do
+    sleep 1m
+done
+
+
+# Install using OOM
+
+
+# Clone OOM:
+cd ~
+git clone -b __oom_gerrit_branch__ https://gerrit.onap.org/r/oom
+cd oom
+if [ ! -z "__oom_gerrit_refspec__" ]; then
+    git fetch https://gerrit.onap.org/r/oom __oom_gerrit_refspec__
+    git checkout FETCH_HEAD
+fi
+git checkout -b workarounds
+git log -1
+
+# Clone integration
+cd ~
+git clone -b __integration_gerrit_branch__ https://gerrit.onap.org/r/integration
+cd integration
+if [ ! -z "__integration_gerrit_refspec__" ]; then
+    git fetch https://gerrit.onap.org/r/integration __integration_gerrit_refspec__
+    git checkout FETCH_HEAD
+fi
+
+
+if [ ! -z "__docker_manifest__" ]; then
+    cd version-manifest/src/main/scripts
+    ./update-oom-image-versions.sh ../resources/__docker_manifest__ ~/oom/
+fi
+
+cd ~/oom
+git diff
+git commit -a -m "apply manifest versions"
+
+cd ~/oom
+# workaround to change onap portal cookie domain
+sed -i "s/^cookie_domain.*=.*/cookie_domain = __portal_hostname__/g" ./kubernetes/portal/charts/portal-app/resources/config/deliveries/properties/ONAPPORTAL/system.properties
+sed -i "s/^cookie_domain.*=.*/cookie_domain = __portal_hostname__/g" ./kubernetes/portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/system.properties
+git diff
+git commit -a -m "set portal cookie domain"
+
+git tag -a "deploy0" -m "initial deployment"
+
+
+echo "install tiller/helm"
+kubectl -n kube-system create serviceaccount tiller
+kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+helm init --service-account tiller
+kubectl -n kube-system rollout status deploy/tiller-deploy
+
+# Run ONAP:
+cd ~/oom/kubernetes/
+helm init --client-only
+helm init --upgrade
+helm serve &
+sleep 10
+helm repo add local http://127.0.0.1:8879
+helm repo list
+make all
+helm search -l | grep local
+
+# install helm deploy plugin
+rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/
+# temporary workaround to throttle the helm deploy to alleviate startup disk contention issues
+if [ ! -z "__helm_deploy_delay__" ]; then
+    sed -i "/\^enabled:/a\      echo sleep __helm_deploy_delay__\n      sleep __helm_deploy_delay__" ~/.helm/plugins/deploy/deploy.sh
+fi
+
+helm deploy dev local/onap -f ~/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f ~/integration-override.yaml --namespace $NAMESPACE --verbose
+
+# re-install original helm deploy plugin
+rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/
+
+helm list
+
+
+
+# Check ONAP status:
+sleep 10
+kubectl get pods --all-namespaces
+kubectl get nodes
+kubectl top nodes
diff --git a/deployment/heat/onap-rke/scripts/Crypto.java b/deployment/heat/onap-rke/scripts/Crypto.java
new file mode 100644 (file)
index 0000000..a9bad50
--- /dev/null
@@ -0,0 +1,82 @@
+import javax.crypto.Cipher;
+import javax.crypto.spec.GCMParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+import java.util.Arrays;
+
+public class Crypto {
+
+    private static final String AES = "AES";
+    private static final int GCM_TAG_LENGTH = 16;
+    private static final int GCM_IV_LENGTH = 12;
+    private static final String AES_GCM_NO_PADDING = "AES/GCM/NoPadding";
+
+    public static void main(String[] args) {
+       if(args.length != 2) {
+               System.out.println("Usage: java Crypto value_to_encrypt key");
+               System.out.println("exit(1)");
+               System.exit(1);
+       }
+
+       String value = args[0];
+       String key = args[1];
+       String encrypted = encryptCloudConfigPassword(value, key);
+       System.out.println(encrypted);
+    }
+
+    /**
+     * encrypt a value and generate a keyfile
+     * if the keyfile is not found then a new one is created
+     * 
+     * @throws GeneralSecurityException
+     */
+    public static String encrypt (String value, String keyString) throws GeneralSecurityException {
+        SecretKeySpec sks = getSecretKeySpec (keyString);
+        Cipher cipher = Cipher.getInstance(AES_GCM_NO_PADDING);
+        byte[] initVector = new byte[GCM_IV_LENGTH];
+        (new SecureRandom()).nextBytes(initVector);
+        GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * java.lang.Byte.SIZE, initVector);
+        cipher.init(Cipher.ENCRYPT_MODE, sks, spec);
+        byte[] encoded = value.getBytes(java.nio.charset.StandardCharsets.UTF_8);
+        byte[] cipherText = new byte[initVector.length + cipher.getOutputSize(encoded.length)];
+        System.arraycopy(initVector, 0, cipherText, 0, initVector.length);
+        cipher.doFinal(encoded, 0, encoded.length, cipherText, initVector.length);
+        return byteArrayToHexString(cipherText);
+    }
+
+    public static String encryptCloudConfigPassword(String message, String key) {
+       try {
+               return Crypto.encrypt(message, key);
+           } catch (GeneralSecurityException e) {
+          return null;
+      }
+    }
+
+    private static SecretKeySpec getSecretKeySpec (String keyString) {
+        byte[] key = hexStringToByteArray (keyString);
+        return new SecretKeySpec (key, AES);
+    }
+
+    public static String byteArrayToHexString (byte[] b) {
+        StringBuilder sb = new StringBuilder(b.length * 2);
+        for (byte aB : b) {
+            int v = aB & 0xff;
+            if (v < 16) {
+                sb.append('0');
+            }
+            sb.append(Integer.toHexString(v));
+        }
+        return sb.toString ().toUpperCase ();
+    }
+
+    private static byte[] hexStringToByteArray (String s) {
+        byte[] b = new byte[s.length () / 2];
+        for (int i = 0; i < b.length; i++) {
+            int index = i * 2;
+            int v = Integer.parseInt (s.substring (index, index + 2), 16);
+            b[i] = (byte) v;
+        }
+        return b;
+    }
+}
\ No newline at end of file
diff --git a/deployment/heat/onap-rke/scripts/cleanup.sh b/deployment/heat/onap-rke/scripts/cleanup.sh
new file mode 100755 (executable)
index 0000000..7c2a1e2
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+IFS='
+'
+
+if [ -z $1 ]; then
+       echo "ONAP component name missing"
+       echo "Usage: ./cleanup.sh onap_component_name"
+       exit 1
+fi
+
+COMPONENT=$1
+
+if [ $COMPONENT == "dcae" ] || [ $COMPONENT == "DCAE" ]; then
+       kubectl delete service consul -n onap
+fi
+
+for op in secrets configmaps pvc pv services deployments statefulsets clusterrolebinding; do
+       ARRAY=(`kubectl get $op -n onap | grep dev-$COMPONENT | awk '{print $1}'`)
+       for i in ${ARRAY[*]}; do
+               kubectl delete $op -n onap $i
+       done
+done
diff --git a/deployment/heat/onap-rke/scripts/deploy.sh b/deployment/heat/onap-rke/scripts/deploy.sh
new file mode 100755 (executable)
index 0000000..c4475b5
--- /dev/null
@@ -0,0 +1,405 @@
+#!/bin/bash
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+
+stack_name="oom"
+portal_hostname="portal.api.simpledemo.onap.org"
+full_deletion=false
+
+if [ -z "$WORKSPACE" ]; then
+    export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+
+usage() {
+    echo "Usage: $0 [ -n <number of VMs {2-15}> ][ -s <stack name> ][ -m <manifest> ][ -d <domain> ][ -r ][ -q ] <env>" 1>&2;
+
+    echo "n:    Set the number of VM's that will be installed. This number must be between 2 and 15" 1>&2;
+    echo "s:    Set the name to be used for stack. This name will be used for naming of resources" 1>&2;
+    echo "d:    Set the base domain name to be used in portal UI URLs" 1>&2;
+    echo "m:    The docker manifest to apply; must be either \"docker-manifest-staging.csv\" or \"docker-manifest.csv\"." 1>&2;
+    echo "r:    Delete all resources relating to ONAP within enviroment." 1>&2;
+    echo "q:    Quiet Delete of all ONAP resources." 1>&2;
+
+    exit 1;
+}
+
+
+while getopts ":n:s:d:m:rq" o; do
+    case "${o}" in
+        n)
+            if [[ ${OPTARG} =~ ^[0-9]+$ ]];then
+                if [ ${OPTARG} -ge 2 -a ${OPTARG} -le 15 ]; then
+                    vm_num=${OPTARG}
+                else
+                    usage
+                fi
+            else
+                usage
+            fi
+            ;;
+        s)
+            if [[ ! ${OPTARG} =~ ^[0-9]+$ ]];then
+                stack_name=${OPTARG}
+            else
+                usage
+            fi
+            ;;
+        d)
+            if [[ ! ${OPTARG} =~ ^[0-9]+$ ]];then
+                portal_hostname=${OPTARG}
+            else
+                usage
+            fi
+            ;;
+        m)
+            if [ -f $WORKSPACE/version-manifest/src/main/resources/${OPTARG} ]; then
+                docker_manifest=${OPTARG}
+            else
+                usage
+            fi
+            ;;
+        r)
+            echo "The following command will delete all information relating to onap within your enviroment"
+            read -p "Are you certain this is what you want? (type y to confirm):" answer
+
+            if [ $answer = "y" ] || [ $answer = "Y" ] || [ $answer = "yes" ] || [ $answer = "Yes"]; then
+                echo "This may delete the work of other colleages within the same enviroment"
+                read -p "Are you certain this is what you want? (type y to confirm):" answer2
+
+                if [ $answer2 = "y" ] || [ $answer2 = "Y" ] || [ $answer2 = "yes" ] || [ $answer2 = "Yes"]; then
+                    full_deletion=true
+                else
+                    echo "Ending program"
+                    exit 1
+                fi
+            else
+                echo "Ending program"
+                exit 1
+            fi
+            ;;
+        q)
+            full_deletion=true
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [ "$#" -ne 1 ]; then
+   usage
+fi
+
+ENV_FILE=$1
+
+if [ ! -f $ENV_FILE ];then
+    echo ENV file does not exist or was not given
+    exit 1
+fi
+
+set -x
+
+SSH_KEY=~/.ssh/onap_key
+
+source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+#SO_ENCRYPTION_KEY=aa3871669d893c7fb8abbcda31b88b4f
+#export OS_PASSWORD_ENCRYPTED=$(echo -n "$OS_PASSWORD" | openssl aes-128-ecb -e -K "$SO_ENCRYPTION_KEY" -nosalt | xxd -c 256 -p)
+
+#Use new encryption method
+pushd $WORKSPACE/deployment/heat/onap-rke/scripts
+javac Crypto.java
+SO_ENCRYPTION_KEY=aa3871669d893c7fb8abbcda31b88b4f
+export OS_PASSWORD_ENCRYPTED=$(java Crypto "$OS_PASSWORD" "$SO_ENCRYPTION_KEY")
+popd
+
+for n in $(seq 1 5); do
+    if [ $full_deletion = true ] ; then
+        $WORKSPACE/test/ete/scripts/teardown-onap.sh -n $stack_name -q
+    else
+        $WORKSPACE/test/ete/scripts/teardown-onap.sh -n $stack_name
+    fi
+
+    cd $WORKSPACE/deployment/heat/onap-rke
+    envsubst < $ENV_FILE > $ENV_FILE~
+    if [ -z "$vm_num" ]; then
+        cp onap-oom.yaml onap-oom.yaml~
+    else
+        ./scripts/gen-onap-oom-yaml.sh $vm_num > onap-oom.yaml~
+    fi
+
+    if ! openstack stack create -t ./onap-oom.yaml~ -e $ENV_FILE~ $stack_name --parameter docker_manifest=$docker_manifest --parameter portal_hostname=$portal_hostname; then
+        break
+    fi
+
+    while [ "CREATE_IN_PROGRESS" == "$(openstack stack show -c stack_status -f value $stack_name)" ]; do
+        sleep 20
+    done
+
+    STATUS=$(openstack stack show -c stack_status -f value $stack_name)
+    echo $STATUS
+    if [ "CREATE_COMPLETE" != "$STATUS" ]; then
+        break
+    fi
+
+    for i in $(seq 1 30); do
+       sleep 30
+       RANCHER_IP=$(openstack stack output show $stack_name rancher_vm_ip -c output_value -f value)
+        K8S_IP=$(openstack stack output show $stack_name k8s_01_vm_ip -c output_value -f value)
+       timeout 1 ping -c 1 "$RANCHER_IP" && break
+    done
+
+    timeout 1 ping -c 1 "$RANCHER_IP" && break
+
+    echo Error: OpenStack infrastructure issue: unable to reach rancher "$RANCHER_IP"
+    sleep 10
+done
+
+if ! timeout 1 ping -c 1 "$RANCHER_IP"; then
+    exit 2
+fi
+
+# wait until all k8s VMs have fully initialized
+for VM_NAME in $(grep _vm: ./onap-oom.yaml~ | cut -d: -f1); do
+    echo $VM_NAME
+    VM_IP=$(openstack stack output show $stack_name ${VM_NAME}_ip -c output_value -f value)
+    ssh-keygen -R $VM_IP
+    until ssh -o StrictHostKeychecking=no -i $SSH_KEY ubuntu@$VM_IP ls -ad /dockerdata-nfs/.git; do
+        sleep 1m
+    done
+done
+
+cat > ./cluster.yml~ <<EOF
+# If you intened to deploy Kubernetes in an air-gapped environment,
+# please consult the documentation on how to configure custom RKE images.
+nodes:
+EOF
+
+for VM_NAME in $(grep -E 'k8s_.+_vm:' ./onap-oom.yaml~ | cut -d: -f1); do
+    echo $VM_NAME
+    VM_IP=$(openstack stack output show $stack_name ${VM_NAME}_ip -c output_value -f value)
+    VM_PRIVATE_IP=$(openstack stack output show $stack_name ${VM_NAME}_private_ip -c output_value -f value)
+    VM_HOSTNAME=$stack_name-$(echo $VM_NAME | tr '_' '-' | cut -d- -f1,2)
+    cat >> ./cluster.yml~ <<EOF
+- address: $VM_IP
+  port: "22"
+  internal_address: $VM_PRIVATE_IP
+  role:
+  - worker
+  hostname_override: "$VM_HOSTNAME"
+  user: ubuntu
+  docker_socket: /var/run/docker.sock
+  ssh_key: ""
+  ssh_key_path: ~/.ssh/onap_key
+  ssh_cert: ""
+  ssh_cert_path: ""
+  labels: {}
+EOF
+done
+
+for VM_NAME in $(grep -E 'orch_.+_vm:' ./onap-oom.yaml~ | cut -d: -f1); do
+    echo $VM_NAME
+    VM_IP=$(openstack stack output show $stack_name ${VM_NAME}_ip -c output_value -f value)
+    VM_PRIVATE_IP=$(openstack stack output show $stack_name ${VM_NAME}_private_ip -c output_value -f value)
+    VM_HOSTNAME=$stack_name-$(echo $VM_NAME | tr '_' '-' | cut -d- -f1,2)
+    cat >> ./cluster.yml~ <<EOF
+- address: $VM_IP
+  port: "22"
+  internal_address: $VM_PRIVATE_IP
+  role:
+  - controlplane
+  - etcd
+  hostname_override: "$VM_HOSTNAME"
+  user: ubuntu
+  docker_socket: /var/run/docker.sock
+  ssh_key: ""
+  ssh_key_path: ~/.ssh/onap_key
+  ssh_cert: ""
+  ssh_cert_path: ""
+  labels: {}
+EOF
+done
+
+cat >> ./cluster.yml~ <<EOF
+services:
+  etcd:
+    image: ""
+    extra_args: {}
+    extra_binds: []
+    extra_env: []
+    external_urls: []
+    ca_cert: ""
+    cert: ""
+    key: ""
+    path: ""
+    snapshot: null
+    retention: ""
+    creation: ""
+    backup_config: null
+  kube-api:
+    image: ""
+    extra_args: {}
+    extra_binds: []
+    extra_env: []
+    service_cluster_ip_range: 10.43.0.0/16
+    service_node_port_range: ""
+    pod_security_policy: false
+    always_pull_images: false
+  kube-controller:
+    image: ""
+    extra_args: {}
+    extra_binds: []
+    extra_env: []
+    cluster_cidr: 10.42.0.0/16
+    service_cluster_ip_range: 10.43.0.0/16
+  scheduler:
+    image: ""
+    extra_args: {}
+    extra_binds: []
+    extra_env: []
+  kubelet:
+    image: ""
+    extra_args: {}
+    extra_binds: []
+    extra_env: []
+    cluster_domain: cluster.local
+    infra_container_image: ""
+    cluster_dns_server: 10.43.0.10
+    fail_swap_on: false
+  kubeproxy:
+    image: ""
+    extra_args: {}
+    extra_binds: []
+    extra_env: []
+network:
+  plugin: canal
+  options: {}
+authentication:
+  strategy: x509
+  sans: []
+  webhook: null
+addons: ""
+addons_include: []
+system_images:
+  etcd: rancher/coreos-etcd:v3.2.24-rancher1
+  alpine: rancher/rke-tools:v0.1.27
+  nginx_proxy: rancher/rke-tools:v0.1.27
+  cert_downloader: rancher/rke-tools:v0.1.27
+  kubernetes_services_sidecar: rancher/rke-tools:v0.1.27
+  kubedns: rancher/k8s-dns-kube-dns:1.15.0
+  dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
+  kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
+  kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
+  coredns: coredns/coredns:1.2.6
+  coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
+  kubernetes: rancher/hyperkube:v1.13.4-rancher1
+  flannel: rancher/coreos-flannel:v0.10.0-rancher1
+  flannel_cni: rancher/flannel-cni:v0.3.0-rancher1
+  calico_node: rancher/calico-node:v3.4.0
+  calico_cni: rancher/calico-cni:v3.4.0
+  calico_controllers: ""
+  calico_ctl: rancher/calico-ctl:v2.0.0
+  canal_node: rancher/calico-node:v3.4.0
+  canal_cni: rancher/calico-cni:v3.4.0
+  canal_flannel: rancher/coreos-flannel:v0.10.0
+  weave_node: weaveworks/weave-kube:2.5.0
+  weave_cni: weaveworks/weave-npc:2.5.0
+  pod_infra_container: rancher/pause:3.1
+  ingress: rancher/nginx-ingress-controller:0.21.0-rancher3
+  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1
+  metrics_server: rancher/metrics-server:v0.3.1
+ssh_key_path: ~/.ssh/onap_key
+ssh_cert_path: ""
+ssh_agent_auth: false
+authorization:
+  mode: rbac
+  options: {}
+ignore_docker_version: false
+kubernetes_version: ""
+private_registries: []
+ingress:
+  provider: ""
+  options: {}
+  node_selector: {}
+  extra_args: {}
+cluster_name: "$stack_name"
+cloud_provider:
+  name: ""
+prefix_path: ""
+addon_job_timeout: 0
+bastion_host:
+  address: ""
+  port: ""
+  user: ""
+  ssh_key: ""
+  ssh_key_path: ""
+  ssh_cert: ""
+  ssh_cert_path: ""
+monitoring:
+  provider: ""
+  options: {}
+restore:
+  restore: false
+  snapshot_name: ""
+dns: null
+EOF
+
+rm -rf ./target
+mkdir -p ./target
+cp ./cluster.yml~ ./target/cluster.yml
+pushd ./target
+
+# spin up k8s with RKE
+until rke up; do
+    sleep 1m
+    rke remove
+done
+
+scp ./kube_config_cluster.yml root@$RANCHER_IP:/root/.kube/config
+popd
+
+
+sleep 2m
+ssh -o StrictHostKeychecking=no -i $SSH_KEY ubuntu@$RANCHER_IP "sed -u '/Cloud-init.*finished/q' <(tail -n+0 -f /var/log/cloud-init-output.log)"
+
+PREV_RESULT=0
+for n in $(seq 1 20); do
+  RESULT=$(ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -c "kubectl -n onap get pods"' | grep -vE 'Running|Complete|NAME' | wc -l)
+  if [[ $? -eq 0 && ( $RESULT -eq 0 || $RESULT -eq $PREV_RESULT ) ]]; then
+    break
+  fi
+  sleep 15m
+  PREV_RESULT=$RESULT
+done
+
+PREV_RESULT=0
+for n in $(seq 1 20); do
+  echo "Wait for HEALTHCHECK count $n of 10"
+  ROBOT_POD=$(ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -c "kubectl --namespace onap get pods"' | grep robot | sed 's/ .*//')
+  ssh -i $SSH_KEY ubuntu@$RANCHER_IP  'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap health"'
+  RESULT=$?
+  if [[ $RESULT -lt 10 && ( $RESULT -eq 0 || $RESULT -eq $PREV_RESULT ) ]]; then
+    break
+  fi
+  sleep 15m
+  PREV_RESULT=$RESULT
+done
+if [ "$ROBOT_POD" == "" ]; then
+  exit 1
+fi
+
+LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep health | head -1" | ssh -i $SSH_KEY ubuntu@$RANCHER_IP sudo su)
+echo "kubectl cp -n onap $ROBOT_POD:share/logs/$LOG_DIR /tmp/robot/logs/$LOG_DIR" | ssh -i $SSH_KEY ubuntu@$RANCHER_IP sudo su
+echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+mkdir -p $WORKSPACE/archives/healthcheck
+rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$RANCHER_IP:/tmp/robot/logs/$LOG_DIR/ $WORKSPACE/archives/healthcheck
+
+exit 0
diff --git a/deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh b/deployment/heat/onap-rke/scripts/gen-onap-oom-yaml.sh
new file mode 100755 (executable)
index 0000000..b700d89
--- /dev/null
@@ -0,0 +1,137 @@
+#!/bin/bash
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+
+if [ "$#" -ne 1 ]; then
+    echo This script generates the HEAT template for X number of k8s VMs
+    echo "$0 <num k8s vms>"
+    exit 1
+fi
+NUM_K8S_VMS=$1
+
+if [ -z "$WORKSPACE" ]; then
+    export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+PARTS_DIR=$WORKSPACE/deployment/heat/onap-rke/parts
+
+cat <<EOF
+#
+# Generated by scripts/gen-onap-oom-yaml.sh; MANUAL CHANGES WILL BE LOST
+#
+EOF
+
+cat $PARTS_DIR/onap-oom-1.yaml
+
+cat <<EOF
+  rancher_vm:
+    type: OS::Nova::Server
+    properties:
+      name:
+        list_join: ['-', [{ get_param: 'OS::stack_name' }, 'rancher']]
+      image: { get_param: ubuntu_1804_image }
+      flavor: { get_param: rancher_vm_flavor }
+      key_name: { get_param: key_name }
+      networks:
+      - port: { get_resource: rancher_private_port }
+      user_data_format: RAW
+      user_data:
+        str_replace:
+          template:
+            get_file: rancher_vm_entrypoint.sh
+          params:
+            __docker_proxy__: { get_param: docker_proxy }
+            __apt_proxy__: { get_param: apt_proxy }
+            __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+            __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+            __integration_override_yaml__: { get_param: integration_override_yaml }
+            __integration_gerrit_branch__: { get_param: integration_gerrit_branch }
+            __integration_gerrit_refspec__: { get_param: integration_gerrit_refspec }
+            __oom_gerrit_branch__: { get_param: oom_gerrit_branch }
+            __oom_gerrit_refspec__: { get_param: oom_gerrit_refspec }
+            __docker_manifest__: { get_param: docker_manifest }
+            __docker_version__: { get_param: docker_version }
+            __rancher_version__: { get_param: rancher_version }
+            __rancher_agent_version__: { get_param: rancher_agent_version }
+            __kubectl_version__: { get_param: kubectl_version }
+            __helm_version__: { get_param: helm_version }
+            __helm_deploy_delay__: { get_param: helm_deploy_delay }
+            __use_ramdisk__: { get_param: use_ramdisk }
+            __mtu__: { get_param: mtu }
+            __portal_hostname__: { get_param: portal_hostname }
+            __public_net_id__: { get_param: public_net_id }
+            __oam_network_cidr__: { get_param: oam_network_cidr }
+            __oam_network_id__: { get_resource: oam_network }
+            __oam_subnet_id__: { get_resource: oam_subnet }
+            __sec_group__: { get_resource: onap_sg }
+            __k8s_01_vm_ip__: { get_attr: [k8s_01_floating_ip, floating_ip_address] }
+            __k8s_vm_ips__: [
+EOF
+
+for VM_NUM in $(seq -f %02g $NUM_K8S_VMS); do
+    K8S_VM_NAME=k8s_$VM_NUM
+    cat <<EOF
+              get_attr: [${K8S_VM_NAME}_floating_ip, floating_ip_address],
+EOF
+done
+
+cat <<EOF
+            ]
+            __k8s_private_ips__: [
+EOF
+
+for VM_NUM in $(seq -f %02g $NUM_K8S_VMS); do
+    K8S_VM_NAME=k8s_$VM_NUM
+    cat <<EOF
+              get_attr: [${K8S_VM_NAME}_floating_ip, fixed_ip_address],
+EOF
+done
+
+cat <<EOF
+            ]
+EOF
+
+for VM_NUM in $(seq -f %02g $NUM_K8S_VMS); do
+    VM_TYPE=k8s HOST_LABEL=compute VM_NUM=$VM_NUM envsubst < $PARTS_DIR/onap-oom-2.yaml
+done
+
+for VM_NUM in $(seq 3); do
+    VM_TYPE=orch HOST_LABEL=orchestration VM_NUM=$VM_NUM envsubst < $PARTS_DIR/onap-oom-2.yaml
+done
+
+
+cat $PARTS_DIR/onap-oom-3.yaml
+
+for VM_NUM in $(seq -f %02g $NUM_K8S_VMS); do
+    K8S_VM_NAME=k8s_$VM_NUM
+    cat <<EOF
+  ${K8S_VM_NAME}_vm_ip:
+    description: The IP address of the ${K8S_VM_NAME} instance
+    value: { get_attr: [${K8S_VM_NAME}_floating_ip, floating_ip_address] }
+
+  ${K8S_VM_NAME}_vm_private_ip:
+    description: The private IP address of the ${K8S_VM_NAME} instance
+    value: { get_attr: [${K8S_VM_NAME}_floating_ip, fixed_ip_address] }
+
+EOF
+done
+
+for VM_NUM in $(seq 3); do
+    K8S_VM_NAME=orch_$VM_NUM
+    cat <<EOF
+  ${K8S_VM_NAME}_vm_ip:
+    description: The IP address of the ${K8S_VM_NAME} instance
+    value: { get_attr: [${K8S_VM_NAME}_floating_ip, floating_ip_address] }
+
+  ${K8S_VM_NAME}_vm_private_ip:
+    description: The private IP address of the ${K8S_VM_NAME} instance
+    value: { get_attr: [${K8S_VM_NAME}_floating_ip, fixed_ip_address] }
+
+EOF
+done
diff --git a/deployment/heat/onap-rke/scripts/prepull-docker.sh b/deployment/heat/onap-rke/scripts/prepull-docker.sh
new file mode 100755 (executable)
index 0000000..37385dd
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/bash -x
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+
+if [ -z "$WORKSPACE" ]; then
+    export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+
+if [ "$#" -ne 1 ]; then
+    echo "Usage: $0 <docker-proxy>"
+    exit 1
+fi
+DOCKER_PROXY=$1
+
+for MANIFEST in docker-manifest.csv docker-manifest-staging.csv; do
+    for DOCKER_IMAGE in $(tail -n +2 $WORKSPACE/version-manifest/src/main/resources/$MANIFEST | tr ',' ':'); do
+        docker pull $DOCKER_PROXY/$DOCKER_IMAGE
+    done
+done
diff --git a/deployment/heat/onap-rke/scripts/redeploy-module.sh b/deployment/heat/onap-rke/scripts/redeploy-module.sh
new file mode 100644 (file)
index 0000000..ab52831
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2019 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+
+if [ "$#" -ne 1 ]; then
+   echo "Please specify module name, i.e. $0 robot"
+   exit 1
+fi
+
+module=$1
+deploy=dev-$1
+cd /root/oom/kubernetes
+helm delete $deploy --purge
+/root/integration/deployment/heat/onap-oom/scripts/cleanup.sh $module
+rm -rf /dockerdata-nfs/$deploy
+make $module
+make onap 
+helm deploy $deploy local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap
diff --git a/deployment/heat/onap-rke/scripts/redeploy.sh b/deployment/heat/onap-rke/scripts/redeploy.sh
new file mode 100755 (executable)
index 0000000..1d46f02
--- /dev/null
@@ -0,0 +1,106 @@
+#!/bin/bash -x
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+
+# This is meant to be run from within the Rancher VM to completely
+# redeploy ONAP while reusing the existing k8s stack.
+#
+# This assumes that /root/integration-override.yaml is up-to-date.
+#
+# This script can also be used after a VM reboot, and will restart
+# helm server accordingly.
+
+export DEBIAN_FRONTEND=noninteractive
+
+usage() {
+    echo "Usage: $0 <namespace>" 1>&2;
+    echo "This will completely re-deploy ONAP, and delete and re-clone oom/ and integration/ directories."
+    exit 1;
+}
+
+if [ "$#" -ne 1 ]; then
+   usage
+fi
+
+
+NS=$1
+OOM_GERRIT_BRANCH=master
+OOM_GERRIT_REFSPEC=refs/heads/master
+INTEGRATION_GERRIT_BRANCH=master
+INTEGRATION_GERRIT_REFSPEC=refs/heads/master
+DOCKER_MANIFEST=""
+
+# Verify that k8s works
+if [ $(kubectl get pods --namespace kube-system | tail -n +2 | grep -c Running) -lt 6 ]; then
+    echo "[ERROR] Kubernetes is not healthy; aborting"
+    exit 1
+fi
+
+if [ ! -f /dockerdata-nfs/rancher_agent_cmd.sh ]; then
+    cp /root/rancher_agent_cmd.sh /dockerdata-nfs
+fi
+
+
+kubectl delete namespace $NS
+for op in secrets configmaps pvc pv services deployments statefulsets clusterrolebinding; do
+    kubectl delete $op -n $NS --all
+done
+helm undeploy dev --purge
+rm -rf /dockerdata-nfs/dev-*/
+
+
+# Clone OOM:
+cd ~
+rm -rf oom/
+git clone -b $OOM_GERRIT_BRANCH https://gerrit.onap.org/r/oom
+cd oom
+git fetch https://gerrit.onap.org/r/oom $OOM_GERRIT_REFSPEC
+git checkout FETCH_HEAD
+git checkout -b workarounds
+git log -1
+
+# Clone integration
+cd ~
+rm -rf integration/
+git clone -b $INTEGRATION_GERRIT_BRANCH https://gerrit.onap.org/r/integration
+cd integration
+git fetch https://gerrit.onap.org/r/integration $INTEGRATION_GERRIT_REFSPEC
+git checkout FETCH_HEAD
+git checkout -b workarounds
+git log -1
+
+if [ ! -z "$DOCKER_MANIFEST" ]; then
+    cd version-manifest/src/main/scripts
+    ./update-oom-image-versions.sh ../resources/$DOCKER_MANIFEST ~/oom/
+fi
+
+cd ~/oom
+git diff
+git commit -a -m "apply manifest versions"
+git tag -a "deploy0" -m "initial deployment"
+
+
+# Run ONAP:
+cd ~/oom/kubernetes/
+
+if [ $(curl -s -o /dev/null -w "%{http_code}" 127.0.0.1:8879) -ne 200 ]; then
+    helm init --client-only
+    helm init --upgrade
+    helm serve &
+    sleep 10
+    helm repo add local http://127.0.0.1:8879
+    helm repo list
+fi
+make all
+rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/
+helm search -l | grep local
+helm deploy dev local/onap -f ~/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f ~/integration-override.yaml --namespace onap | ts | tee -a ~/helm-deploy.log
+helm list
+