Heat template for ONAP OOM deployment 77/28477/1
authorGary Wu <gary.i.wu@huawei.com>
Thu, 18 Jan 2018 03:04:08 +0000 (19:04 -0800)
committerGary Wu <gary.i.wu@huawei.com>
Thu, 18 Jan 2018 04:07:34 +0000 (20:07 -0800)
Change-Id: Iba9bdd6ea12152c48e4fd7c580476e02ebdaacdf
Issue-ID: INT-381
Signed-off-by: Gary Wu <gary.i.wu@huawei.com>
test/ete/labs/huawei/onap-oom.yaml

index 11767cf..ccc8540 100644 (file)
@@ -30,6 +30,8 @@ resources:
             __apt_proxy__: { get_param: apt_proxy }
           template: |
             #!/bin/bash -x
+            printenv
+
             echo `hostname -I` `hostname` >> /etc/hosts
             mkdir -p /etc/docker
             cat > /etc/docker/daemon.json <<EOF
@@ -64,6 +66,8 @@ resources:
             __rancher_ip_addr__: { get_attr: [rancher_vm, first_address] }
           template: |
             #!/bin/bash -x
+            printenv
+
             mkdir -p /opt/config
             echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
             echo `hostname -I` `hostname` >> /etc/hosts
@@ -98,14 +102,16 @@ resources:
             sudo mv linux-amd64/helm /usr/local/bin/helm
 
             # Fix virtual memory allocation for onap-log:elasticsearch:
-            sysctl -w vm.max_map_count=262144
+            echo "vm.max_map_count=262144" >> /etc/sysctl.conf
+            sysctl -p
 
             # install rancher agent
             echo export RANCHER_IP=__rancher_ip_addr__ > api-keys-rc
             source api-keys-rc
 
+            sleep 50
             until curl -s -o projects.json -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projects; do
-              sleep 5
+              sleep 10
             done
             OLD_PID=$(jq -r '.data[0].id' projects.json)
 
@@ -128,14 +134,14 @@ resources:
             source api-keys-rc
 
             until [ $(jq -r '.state' project.json) == "active" ]; do
-              sleep 1
+              sleep 5
               curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID | tee project.json
             done
 
             TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id')
             touch token.json
             while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do
-                sleep 1
+                sleep 5
                 curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID | tee token.json
             done
             CMD=$(jq -r .command token.json)
@@ -153,3 +159,82 @@ resources:
             # Update values.yaml to point to docker-proxy instead of nexus3:
             cd ~/oom/kubernetes
             perl -p -i -e 's/nexus3.onap.org:10001/__docker_proxy__/g' `find ./ -name values.yaml`
+
+            KUBETOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+
+            # create .kube/config
+            cat > ~/.kube/config <<EOF
+            apiVersion: v1
+            kind: Config
+            clusters:
+            - cluster:
+                api-version: v1
+                insecure-skip-tls-verify: true
+                server: "https://$RANCHER_IP:8080/r/projects/$PID/kubernetes:6443"
+              name: "oom"
+            contexts:
+            - context:
+                cluster: "oom"
+                user: "oom"
+              name: "oom"
+            current-context: "oom"
+            users:
+            - name: "oom"
+              user:
+                token: "$KUBETOKEN"
+            EOF
+            cat ~/.kube/config
+
+            # Update ~/oom/kubernetes/kube2msb/values.yaml kubeMasterAuthToken to use the token from ~/.kube/config
+            sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kubernetes/kube2msb/values.yaml
+
+            export KUBECONFIG=/root/.kube/config
+            kubectl config view
+
+            # wait for kubernetes to initialze
+            sleep 100
+            until [ $(kubectl get pods --all-namespaces | tail -n +2 | grep -c Running) -ge 6 ]; do
+              sleep 10
+            done
+
+            # Put your onap_key ssh private key in ~/.ssh/onap_key
+
+            # Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
+            cp ~/oom/kubernetes/config/onap-parameters-sample.yaml ~/oom/kubernetes/config/onap-parameters.yaml
+            cat >> ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
+            OPENSTACK_UBUNTU_14_IMAGE: "trusty"
+            OPENSTACK_PUBLIC_NET_ID: "024582bd-ef9b-48b9-9e70-e6732559d9df"
+            OPENSTACK_OAM_NETWORK_ID: "a899f36c-28e1-4aa9-9451-1b9f41feefa5"
+            OPENSTACK_OAM_SUBNET_ID: "b9627602-2908-4aee-94b5-4f1dc92017df"
+            OPENSTACK_OAM_NETWORK_CIDR: "172.16.1.0/24"
+            OPENSTACK_USERNAME: "demo"
+            OPENSTACK_API_KEY: "demo"
+            OPENSTACK_TENANT_NAME: "demo"
+            OPENSTACK_TENANT_ID: "__public_net_id__"
+            OPENSTACK_REGION: "RegionOne"
+            OPENSTACK_KEYSTONE_URL: "http://192.168.1.11:5000"
+            OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
+            OPENSTACK_SERVICE_TENANT_NAME: "service"
+            DMAAP_TOPIC: "AUTO"
+            DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
+            EOF
+
+            # Source the environment file:
+            cd ~/oom/kubernetes/oneclick/
+            source setenv.bash
+
+            # run the config pod creation
+            cd ~/oom/kubernetes/config
+            ./createConfig.sh -n onap
+
+            # Wait until the config container completes.
+            sleep 200
+            until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do
+              sleep 10
+            done
+
+            # Run ONAP:
+            cd ~/oom/kubernetes/oneclick/
+            ./createAll.bash -n onap
+
+            # Check ONAP status: