+++ /dev/null
-export OS_PROJECT_DOMAIN_NAME=Default
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_ID=675ca78806b7485e9c96bd70cd5734ac
-export OS_PROJECT_NAME=onap-heat
-export OS_USERNAME=demo
-#export OS_PASSWORD=demo
-export OS_AUTH_URL=http://controller:5000/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_IMAGE_API_VERSION=2
+++ /dev/null
-export OS_PROJECT_DOMAIN_NAME=Default
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_ID=e6507947d42646ea8045bcf2956b753a
-export OS_PROJECT_NAME=onap-oom
-export OS_USERNAME=demo
-#export OS_PASSWORD=demo
-export OS_AUTH_URL=http://controller:5000/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_IMAGE_API_VERSION=2
+++ /dev/null
-parameters:
-
- ubuntu_1604_image: xenial
-
- apt_proxy: 192.168.1.51:3142
- docker_proxy: 192.168.2.18:5000
-
- rancher_vm_flavor: c1.xlarge
- k8s_vm_flavor: c1.xlarge
-
- public_net_id: c3352d4c-8452-4172-b09c-15f017673708
- key_name: onap-key
-
- oam_network_cidr: 10.0.0.0/16
-
- integration_override_yaml: >
- global:
- repository: __docker_proxy__
- pullPolicy: IfNotPresent
- robot:
- openStackKeyStoneUrl: "http://192.168.1.11:5000"
- openStackPublicNetId: "__public_net_id__"
- openStackTenantId: "${OS_PROJECT_ID}"
- openStackUserName: "${OS_USERNAME}"
- ubuntu14Image: "trusty"
- ubuntu16Image: "xenial"
- openStackPrivateNetId: "__oam_network_id__"
- openStackPrivateSubnetId: "__oam_subnet_id__"
- openStackPrivateNetCidr: "__oam_network_cidr__"
- openStackOamNetworkCidrPrefix: "10.0"
- dcaeCollectorIp: "__k8s_1_vm_ip__"
- vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
- demoArtifactsVersion: "1.2.2"
- scriptVersion: "1.2.1"
- rancherIpAddress: "__rancher_ip_addr__"
- config:
- openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
- so:
- config:
- openStackUserName: "${OS_USERNAME}"
- openStackKeyStoneUrl: "http://192.168.1.11:5000"
- openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
- appc:
- replicaCount: 1
- config:
- enableClustering: false
- sdnc:
- replicaCount: 3
- config:
- enableClustering: true
+++ /dev/null
-export OS_PROJECT_DOMAIN_NAME=Default
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_ID=66a48e4b0a934463bef0bd694f93147a
-export OS_PROJECT_NAME=onap-heat
-export OS_USERNAME=demo
-#export OS_PASSWORD=demo
-export OS_AUTH_URL=http://controller.neo.futurewei.com:5000/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_IMAGE_API_VERSION=2
+++ /dev/null
-export OS_PROJECT_DOMAIN_NAME=Default
-export OS_USER_DOMAIN_NAME=Default
-export OS_PROJECT_ID=13159ba149fa47ea9646902ce5734b89
-export OS_PROJECT_NAME=onap-oom
-export OS_USERNAME=demo
-#export OS_PASSWORD=demo
-export OS_AUTH_URL=http://controller.neo.futurewei.com:5000/v3
-export OS_IDENTITY_API_VERSION=3
-export OS_IMAGE_API_VERSION=2
+++ /dev/null
-parameters:
-
- ubuntu_1604_image: xenial
-
- apt_proxy: 10.145.122.117:8000
- docker_proxy: 10.145.123.23:5000
-
- rancher_vm_flavor: m1.xlarge
- k8s_vm_flavor: m1.xlarge
-
- public_net_id: 7baa99ac-43a0-4013-9dfb-bbb94fdcd91a
- key_name: onap-key
-
- oam_network_cidr: 10.0.0.0/16
-
- integration_override_yaml: >
- global:
- repository: __docker_proxy__
- pullPolicy: IfNotPresent
- robot:
- openStackKeyStoneUrl: "http://10.145.122.118:5000"
- openStackPublicNetId: "__public_net_id__"
- openStackTenantId: "${OS_PROJECT_ID}"
- openStackUserName: "${OS_USERNAME}"
- ubuntu14Image: "trusty"
- ubuntu16Image: "xenial"
- openStackPrivateNetId: "__oam_network_id__"
- openStackPrivateSubnetId: "__oam_subnet_id__"
- openStackPrivateNetCidr: "__oam_network_cidr__"
- openStackOamNetworkCidrPrefix: "10.0"
- dcaeCollectorIp: "__k8s_1_vm_ip__"
- vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
- demoArtifactsVersion: "1.2.2"
- scriptVersion: "1.2.1"
- rancherIpAddress: "__rancher_ip_addr__"
- config:
- openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
- so:
- config:
- openStackUserName: "${OS_USERNAME}"
- openStackKeyStoneUrl: "http://10.145.122.118:5000"
- openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
- appc:
- replicaCount: 1
- config:
- enableClustering: false
- sdnc:
- replicaCount: 3
- config:
- enableClustering: true
rancher_vm_flavor: m5.xlarge
k8s_vm_flavor: m4.xlarge
- etcd_vm_flavor: m1.medium
- orch_vm_flavor: m1.medium
+ etcd_vm_flavor: m1.large
+ orch_vm_flavor: m1.xlarge
public_net_id: fbe8fd92-6636-4e63-ab28-bb6a5b0888a9
repository: __docker_proxy__
pullPolicy: IfNotPresent
robot:
+ flavor: large
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
openStackKeyStoneUrl: "https://bdc1tlab01.research.att.com:5000"
openStackPublicNetId: "__oam_network_id__" # NOTE: for TLAB, openStackPublicNetId needs to be oam_network_id instead of public_net_id
openStackTenantId: "${OS_PROJECT_ID}"
openStackOamNetworkCidrPrefix: "10.0"
dcaeCollectorIp: "__k8s_1_vm_ip__"
vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
- demoArtifactsVersion: "1.2.2"
- scriptVersion: "1.2.1"
+ demoArtifactsVersion: "1.3.0-SNAPSHOT"
+ scriptVersion: "1.3.0-SNAPSHOT"
rancherIpAddress: "__rancher_ip_addr__"
config:
openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
replicaCount: 1
config:
enableClustering: false
+ enableAAF: true
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ openStackKeyStoneUrl: "https://bdc1tlab01.research.att.com:5000/v2.0"
+ openStackServiceTenantName: "${OS_PROJECT_NAME}"
+ openStackDomain: "${OS_USER_DOMAIN_NAME}"
+ openStackUserName: "${OS_USERNAME}"
+ openStackEncryptedPassword: "${OS_PASSWORD}"
sdnc:
replicaCount: 1
config:
rancher_vm_flavor: m2.xlarge
k8s_vm_flavor: m1.xlarge
- etcd_vm_flavor: m1.medium
- orch_vm_flavor: m1.medium
+ etcd_vm_flavor: m1.large
+ orch_vm_flavor: m1.xlarge
public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4
repository: __docker_proxy__
pullPolicy: IfNotPresent
robot:
+ flavor: large
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
openStackKeyStoneUrl: "http://10.12.25.2:5000"
openStackPublicNetId: "__public_net_id__"
openStackTenantId: "${OS_PROJECT_ID}"
openStackOamNetworkCidrPrefix: "10.0"
dcaeCollectorIp: "__k8s_1_vm_ip__"
vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
- demoArtifactsVersion: "1.2.2"
- scriptVersion: "1.2.1"
+ demoArtifactsVersion: "1.3.0-SNAPSHOT"
+ scriptVersion: "1.3.0-SNAPSHOT"
rancherIpAddress: "__rancher_ip_addr__"
config:
openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
replicaCount: 1
config:
enableClustering: false
+ enableAAF: true
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+ openStackServiceTenantName: "${OS_PROJECT_NAME}"
+ openStackDomain: "${OS_USER_DOMAIN_NAME}"
+ openStackUserName: "${OS_USERNAME}"
+ openStackEncryptedPassword: "${OS_PASSWORD}"
sdnc:
replicaCount: 1
config:
#
export DEBIAN_FRONTEND=noninteractive
+HOST_IP=$(hostname -I)
+echo $HOST_IP `hostname` >> /etc/hosts
printenv
mkdir -p /opt/config
echo "__docker_version__" > /opt/config/docker_version.txt
echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
echo "__rancher_private_ip_addr__" > /opt/config/rancher_private_ip_addr.txt
-HOST_IP=$(hostname -I)
-echo $HOST_IP `hostname` >> /etc/hosts
mkdir -p /etc/docker
if [ ! -z "__docker_proxy__" ]; then
get_attr: [k8s_8_floating_ip, floating_ip_address],
get_attr: [k8s_9_floating_ip, floating_ip_address],
get_attr: [k8s_10_floating_ip, floating_ip_address],
- get_attr: [k8s_11_floating_ip, floating_ip_address],
]
__k8s_private_ips__: [
get_attr: [k8s_1_floating_ip, fixed_ip_address],
get_attr: [k8s_8_floating_ip, fixed_ip_address],
get_attr: [k8s_9_floating_ip, fixed_ip_address],
get_attr: [k8s_10_floating_ip, fixed_ip_address],
- get_attr: [k8s_11_floating_ip, fixed_ip_address],
]
k8s_1_private_port:
type: OS::Neutron::Port
template:
get_file: k8s_vm_entrypoint.sh
- k8s_11_private_port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: oam_network }
- fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
- security_groups:
- - { get_resource: onap_sg }
-
- k8s_11_floating_ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network_id: { get_param: public_net_id }
- port_id: { get_resource: k8s_11_private_port }
-
- k8s_11_vm:
- type: OS::Nova::Server
- properties:
- name:
- list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s', '11' ] ]
- image: { get_param: ubuntu_1604_image }
- flavor: { get_param: k8s_vm_flavor }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: k8s_11_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
-
etcd_1_private_port:
type: OS::Neutron::Port
properties:
template:
get_file: k8s_vm_entrypoint.sh
- orch_1_private_port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: oam_network }
- fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
- security_groups:
- - { get_resource: onap_sg }
-
- orch_1_floating_ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network_id: { get_param: public_net_id }
- port_id: { get_resource: orch_1_private_port }
-
- orch_1_vm:
- type: OS::Nova::Server
- properties:
- name:
- list_join: ['-', [ { get_param: 'OS::stack_name' }, 'orch', '1' ] ]
- image: { get_param: ubuntu_1604_image }
- flavor: { get_param: orch_vm_flavor }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: orch_1_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'orchestration'
- template:
- get_file: k8s_vm_entrypoint.sh
-
- orch_2_private_port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: oam_network }
- fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
- security_groups:
- - { get_resource: onap_sg }
-
- orch_2_floating_ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network_id: { get_param: public_net_id }
- port_id: { get_resource: orch_2_private_port }
-
- orch_2_vm:
- type: OS::Nova::Server
- properties:
- name:
- list_join: ['-', [ { get_param: 'OS::stack_name' }, 'orch', '2' ] ]
- image: { get_param: ubuntu_1604_image }
- flavor: { get_param: orch_vm_flavor }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: orch_2_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'orchestration'
- template:
- get_file: k8s_vm_entrypoint.sh
-
outputs:
rancher_vm_ip:
description: The IP address of the rancher instance
description: The IP address of the k8s_10 instance
value: { get_attr: [k8s_10_floating_ip, floating_ip_address] }
- k8s_11_vm_ip:
- description: The IP address of the k8s_11 instance
- value: { get_attr: [k8s_11_floating_ip, floating_ip_address] }
-
#
export DEBIAN_FRONTEND=noninteractive
+HOST_IP=$(hostname -I)
+echo $HOST_IP `hostname` >> /etc/hosts
printenv
mkdir -p /opt/config
mkdir ~/.kube
# install helm __helm_version__
+mkdir -p helm
+pushd helm
wget -q http://storage.googleapis.com/kubernetes-helm/helm-v__helm_version__-linux-amd64.tar.gz
tar -zxvf helm-v__helm_version__-linux-amd64.tar.gz
-sudo mv linux-amd64/helm /usr/local/bin/helm
+sudo cp linux-amd64/helm /usr/local/bin/helm
+popd
+mkdir -p rancher
+pushd rancher
echo export RANCHER_IP=__rancher_private_ip_addr__ > api-keys-rc
source api-keys-rc
jq -r .command token.json > rancher_agent_cmd.sh
chmod +x rancher_agent_cmd.sh
cp rancher_agent_cmd.sh /dockerdata-nfs
+popd
+
cd /dockerdata-nfs
git add -A
git commit -a -m "Add rancher agent command file"
cd ~
+cp /dockerdata-nfs/rancher_agent_cmd.sh .
+sed -i "s/docker run/docker run -e CATTLE_HOST_LABELS='orchestration=true' -e CATTLE_AGENT_IP=${HOST_IP}/g" rancher_agent_cmd.sh
+source rancher_agent_cmd.sh
+
+
KUBETOKEN=$(echo -n 'Basic '$(echo -n "$CATTLE_ACCESS_KEY:$CATTLE_SECRET_KEY" | base64 -w 0) | base64 -w 0)
export KUBECONFIG=/root/.kube/config
kubectl config view
+
+
# Enable auto-completion for kubectl
echo "source <(kubectl completion bash)" >> ~/.bashrc
kubectl delete service consul -n onap
fi
-for op in secrets configmaps pvc pv services deployments statefulsets; do
+for op in secrets configmaps pvc pv services deployments statefulsets clusterrolebinding; do
ARRAY=(`kubectl get $op -n onap | grep dev-$COMPONENT | awk '{print $1}'`)
for i in ${ARRAY[*]}; do
kubectl delete $op -n onap $i
sleep 2m
ssh -o StrictHostKeychecking=no -i $SSH_KEY ubuntu@$RANCHER_IP "sed -u '/Cloud-init.*finished/q' <(tail -n+0 -f /var/log/cloud-init-output.log)"
-for n in $(seq 1 8); do
- echo "Wait count $n of 8"
- sleep 15m
- ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap health"'
- RESULT=$?
- if [ $RESULT -eq 0 ]; then
- break
- fi
+PREV_RESULT=0
+for n in $(seq 1 20); do
+ RESULT=$(ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -c "kubectl -n onap get pods"' | grep -vE 'Running|Complete|NAME' | wc -l)
+ if [[ $? -eq 0 && ( $RESULT -eq 0 || $RESULT -eq $PREV_RESULT ) ]]; then
+ break
+ fi
+ sleep 15m
+ PREV_RESULT=$RESULT
+done
+
+PREV_RESULT=0
+for n in $(seq 1 20); do
+ echo "Wait for HEALTHCHECK count $n of 10"
+ ROBOT_POD=$(ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -c "kubectl --namespace onap get pods"' | grep robot | sed 's/ .*//')
+ ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -l root -c "/root/oom/kubernetes/robot/ete-k8s.sh onap health"'
+ RESULT=$?
+ if [[ $RESULT -lt 10 && ( $RESULT -eq 0 || $RESULT -eq $PREV_RESULT ) ]]; then
+ break
+ fi
+ sleep 15m
+ PREV_RESULT=$RESULT
done
-ROBOT_POD=$(ssh -i $SSH_KEY ubuntu@$RANCHER_IP 'sudo su -c "kubectl --namespace onap get pods"' | grep robot | sed 's/ .*//')
if [ "$ROBOT_POD" == "" ]; then
- exit 1
+ exit 1
fi
LOG_DIR=$(echo "kubectl exec -n onap $ROBOT_POD -- ls -1t /share/logs | grep health | head -1" | ssh -i $SSH_KEY ubuntu@$RANCHER_IP sudo su)
-if [ "$LOG_DIR" == "" ]; then
- exit 1
-fi
-
echo "kubectl cp -n onap $ROBOT_POD:share/logs/$LOG_DIR /tmp/robot/logs/$LOG_DIR" | ssh -i $SSH_KEY ubuntu@$RANCHER_IP sudo su
-rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$RANCHER_IP:/tmp/robot/logs/$LOG_DIR/ $WORKSPACE/archives/
-
echo "Browse Robot results at http://$K8S_IP:30209/logs/$LOG_DIR/"
+mkdir -p $WORKSPACE/archives/healthcheck
+rsync -e "ssh -i $SSH_KEY" -avtz ubuntu@$RANCHER_IP:/tmp/robot/logs/$LOG_DIR/ $WORKSPACE/archives/healthcheck
exit 0
VM_TYPE=etcd HOST_LABEL=etcd VM_NUM=$VM_NUM envsubst < $PARTS_DIR/onap-oom-2.yaml
done
-for VM_NUM in $(seq 2); do
+for VM_NUM in $(seq 0); do
VM_TYPE=orch HOST_LABEL=orchestration VM_NUM=$VM_NUM envsubst < $PARTS_DIR/onap-oom-2.yaml
done
--- /dev/null
+#!/bin/bash -x
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+# This is meant to be run from within the Rancher VM to completely
+# redeploy ONAP while reusing the existing k8s stack.
+#
+# This assumes that /root/integration-override.yaml is up-to-date.
+#
+# This script can also be used after a VM reboot, and will restart
+# helm server accordingly.
+
+export DEBIAN_FRONTEND=noninteractive
+
+usage() {
+ echo "Usage: $0 <namespace>" 1>&2;
+ echo "This will completely re-deploy ONAP, and delete and re-clone oom/ and integration/ directories."
+ exit 1;
+}
+
+if [ "$#" -ne 1 ]; then
+ usage
+fi
+
+
+NS=$1
+OOM_GERRIT_BRANCH=master
+OOM_GERRIT_REFSPEC=refs/heads/master
+INTEGRATION_GERRIT_BRANCH=master
+INTEGRATION_GERRIT_REFSPEC=refs/heads/master
+DOCKER_MANIFEST=""
+
+# Verify that k8s works
+if [ $(kubectl get pods --namespace kube-system | tail -n +2 | grep -c Running) -lt 6 ]; then
+ echo "[ERROR] Kubernetes is not healthy; aborting"
+ exit 1
+fi
+
+if [ ! -f /dockerdata-nfs/rancher_agent_cmd.sh ]; then
+ cp /root/rancher_agent_cmd.sh /dockerdata-nfs
+fi
+
+
+kubectl delete namespace $NS
+for op in secrets configmaps pvc pv services deployments statefulsets clusterrolebinding; do
+ kubectl delete $op -n $NS --all
+done
+helm undeploy dev --purge
+rm -rf /dockerdata-nfs/dev-*/
+
+
+# Clone OOM:
+cd ~
+rm -rf oom/
+git clone -b $OOM_GERRIT_BRANCH https://gerrit.onap.org/r/oom
+cd oom
+git fetch https://gerrit.onap.org/r/oom $OOM_GERRIT_REFSPEC
+git checkout FETCH_HEAD
+git checkout -b workarounds
+git log -1
+
+# Clone integration
+cd ~
+rm -rf integration/
+git clone -b $INTEGRATION_GERRIT_BRANCH https://gerrit.onap.org/r/integration
+cd integration
+git fetch https://gerrit.onap.org/r/integration $INTEGRATION_GERRIT_REFSPEC
+git checkout FETCH_HEAD
+git checkout -b workarounds
+git log -1
+
+if [ ! -z "$DOCKER_MANIFEST" ]; then
+ cd version-manifest/src/main/scripts
+ ./update-oom-image-versions.sh ../resources/$DOCKER_MANIFEST ~/oom/
+fi
+
+cd ~/oom
+git diff
+git commit -a -m "apply manifest versions"
+git tag -a "deploy0" -m "initial deployment"
+
+
+# Run ONAP:
+cd ~/oom/kubernetes/
+
+if [ $(curl -s -o /dev/null -w "%{http_code}" 127.0.0.1:8879) -ne 200 ]; then
+ helm init --client-only
+ helm init --upgrade
+ helm serve &
+ sleep 10
+ helm repo add local http://127.0.0.1:8879
+ helm repo list
+fi
+make all
+rsync -avt ~/oom/kubernetes/helm/plugins ~/.helm/
+helm search -l | grep local
+helm deploy dev local/onap -f ~/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f ~/integration-override.yaml --namespace onap | ts | tee -a ~/helm-deploy.log
+helm list
+
heat_template_version: 2013-05-23
parameters:
- name: { description: Instance name, label: Name, type: string, default: hv-ves_sim }
+ name: { description: Instance name, label: Name, type: string, default: hvves-sim }
flavor_name: { description: Instance flavor to be used, label: Flavor Name, type: string }
image_name: { description: Ubuntu 16.04 image to be used, label: Image Name, type: string }
private_net_id: { description: Private network id, label: Private Network ID, type: string }
}
proto_files_checkout () {
- mkdir -p ~/hv-ves_sim/proto;cd ~/hv-ves_sim/proto;wget "https://gerrit.onap.org/r/gitweb?p=dcaegen2/collectors/hv-ves.git;a=blob_plain;f=hv-collector-domain/src/main/proto/event/VesEvent.proto;hb=HEAD" -O VesEvent.proto;wget "https://gerrit.onap.org/r/gitweb?p=dcaegen2/collectors/hv-ves.git;a=blob_plain;f=hv-collector-domain/src/main/proto/measurements/HVMeasFields.proto;hb=HEAD" -O HVMeasFields.proto;wget "https://gerrit.onap.org/r/gitweb?p=dcaegen2/collectors/hv-ves.git;a=blob_plain;f=hv-collector-domain/src/main/proto/measurements/MeasDataCollection.proto;hb=HEAD" -O MeasDataCollection.proto
+ mkdir -p ~/hv-ves_sim/proto;cd ~/hv-ves_sim/proto;wget "https://gerrit.onap.org/r/gitweb?p=dcaegen2/collectors/hv-ves.git;a=blob_plain;f=hv-collector-domain/src/main/proto/event/VesEvent.proto;hb=HEAD" -O VesEvent.proto
}
kafkacat_install () {
message_samples_checkout () {
mkdir ~/hv-ves_sim/samples
- cd ~/hv-ves_sim/samples ; wget "https://gerrit.onap.org/r/gitweb?p=integration.git;a=blob_plain;f=test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/authorization/xnf-valid-messages-request.json;hb=HEAD" -O xnf-valid-messages-request.json ; wget "https://gerrit.onap.org/r/gitweb?p=integration.git;a=blob_plain;f=test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/invalid-gpb-data/xnf-invalid-gpb-data-request.json;hb=HEAD" -O xnf-invalid-gpb-data-request.json ; wget "https://gerrit.onap.org/r/gitweb?p=integration.git;a=blob_plain;f=test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/multiple-simulators-payload/xnf-simulator-smaller-valid-request.json;hb=HEAD" -O xnf-simulator-smaller-valid-request.json
+ cd ~/hv-ves_sim/samples ; wget "https://gerrit.onap.org/r/gitweb?p=integration/csit.git;a=blob_plain;f=tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/authorization/xnf-valid-messages-request.json;hb=HEAD" -O xnf-valid-messages-request.json ; wget "https://gerrit.onap.org/r/gitweb?p=integration/csit.git;a=blob_plain;f=tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/invalid-gpb-data/xnf-invalid-gpb-data-request.json;hb=HEAD" -O xnf-invalid-gpb-data-request.json ; wget "https://gerrit.onap.org/r/gitweb?p=integration/csit.git;a=blob_plain;f=tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/multiple-simulators-payload/xnf-simulator-smaller-valid-request.json;hb=HEAD" -O xnf-simulator-smaller-valid-request.json
}
set_versions
The recommended way is to checkout PNF Simulator project from ONAP Git repository and use *simulator*.sh script.
If you copy *simulator.sh* script to another location, keep in mind to copy also *docker-compose.yml* and directories: *config,json_schema and netconf*.
In order to run simulator, invoke ./simulator.sh start
+You may be asked for providing password for ypur user during startup.
Script downloads if necessary needed Docker images and runs instances of these images.
+The easiest way is to download or generate PNF Simulator zip archive with all needed configuration files.
###Logging
It is possible to get access to logs by invocation of *./simulator.sh* logs.
restart: on-failure
depends_on:
- sftp-server
- - ftpes-server
+ - ftpes-server-pure-ftpd
+ - ftpes-server-vsftpd
sftp-server:
container_name: sftp-server
restart: on-failure
command: sftp-user::1001
- ftpes-server:
- container_name: ftpes-server
+ ftpes-server-pure-ftpd:
+ container_name: ftpes-server-pure-ftpd
image: stilliard/pure-ftpd:latest
ports:
- "2221:21"
FTP_USER_HOME: onap
restart: on-failure
- vsftpd_ftpes_server:
- container_name: vsftpd_ftpes_server
+ ftpes-server-vsftpd:
+ container_name: ftpes-server-vsftpd
image: docker.io/panubo/vsftpd
ports:
- "8221:21"
- ./ftpes/files/onap/ftpes-onap.txt:/srv/ftpes-onap.txt:ro
restart: on-failure
command: vsftpd /etc/vsftpd_ssl.conf
+
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
-
<parent>
<groupId>org.onap.oparent</groupId>
<artifactId>oparent</artifactId>
fi
}
+function set_vsftpd_file_owner() {
+ sudo chown root ./ftpes/vsftpd/configuration/vsftpd_ssl.conf
+}
+
function start_netconf_server() {
+ set_vsftpd_file_owner
docker-compose -f $1 up -d $NETOPEER_CONTAINER_NAME
echo
echo "NETCONF server container's logs:"
echo "Simulator containers are already up"
else
echo "Starting simulator containers using netconf model specified in config/netconf.env"
-
+ set_vsftpd_file_owner
archive_logs
start_netconf_server $1
docker-compose -f $1 up -d $SIMULATOR_CONTAINER_NAME
FOR DEVELOPERS
1. Build local simulator image using "./simulator.sh build"
-2. Run containers with "./simulator.sh start-debug"
+2. Run containers with "./simulator.sh start-dev"
If you change the source code you have to rebuild image with "./simulator.sh build" and run "./simulator.sh start/start-dev" again
EndOfMessage
<includes>
<include>**/*</include>
</includes>
+ <fileMode>0644</fileMode>
+
</fileSet>
<fileSet>
<directory>json_schema</directory>
<includes>
<include>**/*</include>
</includes>
+
</fileSet>
</fileSets>
</assembly>
\ No newline at end of file
image,tag
-onap/aaf/aaf_agent,2.1.6-SNAPSHOT
-onap/aaf/aaf_cass,2.1.6-SNAPSHOT
-onap/aaf/aaf_cm,2.1.6-SNAPSHOT
-onap/aaf/aaf_config,2.1.6-SNAPSHOT
-onap/aaf/aaf_fs,2.1.6-SNAPSHOT
-onap/aaf/aaf_gui,2.1.6-SNAPSHOT
-onap/aaf/aaf_hello,2.1.6-SNAPSHOT
-onap/aaf/aaf_locate,2.1.6-SNAPSHOT
-onap/aaf/aaf_oauth,2.1.6-SNAPSHOT
-onap/aaf/aaf_service,2.1.6-SNAPSHOT
+onap/aaf/aaf_agent,2.1.7-SNAPSHOT
+onap/aaf/aaf_cass,2.1.7-SNAPSHOT
+onap/aaf/aaf_cm,2.1.7-SNAPSHOT
+onap/aaf/aaf_config,2.1.7-SNAPSHOT
+onap/aaf/aaf_fs,2.1.7-SNAPSHOT
+onap/aaf/aaf_gui,2.1.7-SNAPSHOT
+onap/aaf/aaf_hello,2.1.7-SNAPSHOT
+onap/aaf/aaf_locate,2.1.7-SNAPSHOT
+onap/aaf/aaf_oauth,2.1.7-SNAPSHOT
+onap/aaf/aaf_service,2.1.7-SNAPSHOT
onap/aaf/abrmd,3.0.0-SNAPSHOT-latest
onap/aaf/distcenter,3.0.0-SNAPSHOT-latest
-onap/aaf/sms,3.0.0-SNAPSHOT-latest
-onap/aaf/smsquorumclient,3.0.0-SNAPSHOT-latest
+onap/aaf/sms,3.0.1-SNAPSHOT-latest
+onap/aaf/smsquorumclient,3.0.1-SNAPSHOT-latest
onap/aaf/testcaservice,3.0.0-SNAPSHOT-latest
onap/aai-graphadmin,1.0-STAGING-latest
onap/aai-resources,1.3-STAGING-latest
onap/music/cassandra_3_11,3.0.23
onap/music/cassandra_job,3.0.23
onap/music/cassandra_music,3.0.0
-onap/music/music,2.5.3
+onap/music/music,3.0.23
onap/music/prom,1.0.5-latest
onap/network-discovery,latest
onap/oom/kube2msb,1.1.0
onap/spike,1.0-STAGING-latest
onap/testsuite,1.3.1-STAGING-latest
onap/tproxy-config,2.1-STAGING-latest
-onap/usecase-ui,1.2.0-STAGING-latest
+onap/usecase-ui,1.2.1-STAGING-latest
onap/usecase-ui-server,1.2.0-STAGING-latest
onap/validation,1.3-STAGING-latest
onap/vfc/catalog,1.2.0-STAGING-latest
onap/aaf/aaf_service,2.1.6
onap/aaf/abrmd,3.0.0
onap/aaf/distcenter,3.0.0
-onap/aaf/sms,3.0.0
-onap/aaf/smsquorumclient,3.0.0
+onap/aaf/sms,3.0.1
+onap/aaf/smsquorumclient,3.0.1
onap/aaf/testcaservice,3.0.0
onap/aai-cacher,1.0.0
onap/aai-graphadmin,1.0.0
onap/ccsdk-odl-oxygen-image,0.3.1
onap/ccsdk-odlsli-image,0.3.1
onap/champ,1.3.0
-onap/clamp,3.0.1
-onap/clamp-dashboard-kibana,3.0.1
-onap/clamp-dashboard-logstash,3.0.1
+onap/clamp,3.0.2
+onap/clamp-dashboard-kibana,3.0.2
+onap/clamp-dashboard-logstash,3.0.2
onap/cli,2.0.4
onap/data-router,1.3.0
onap/dmaap/buscontroller,1.0.23
onap/org.onap.dcaegen2.deployments.bootstrap,1.1.3
onap/org.onap.dcaegen2.deployments.cm-container,1.4.2
onap/org.onap.dcaegen2.deployments.healthcheck-container,1.1.2
-onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container,1.4.4
+onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container,1.4.5
onap/org.onap.dcaegen2.deployments.redis-cluster-container,1.0.0
onap/org.onap.dcaegen2.deployments.tca-cdap-container,1.1.0
onap/org.onap.dcaegen2.deployments.tls-init-container,1.0.0
onap/pomba-context-aggregator,1.3.2
onap/pomba-network-discovery-context-builder,1.3.0
onap/pomba-sdc-context-builder,1.3.1
-onap/portal-app,2.3.0
-onap/portal-db,2.3.0
-onap/portal-sdk,2.3.0
-onap/portal-wms,2.3.0
+onap/portal-app,2.3.1
+onap/portal-db,2.3.1
+onap/portal-sdk,2.3.1
+onap/portal-wms,2.3.1
onap/sdc-backend,1.3.0
onap/sdc-backend-init,1.3.0
onap/sdc-cassandra,1.3.0
onap/so/so-monitoring,1.3.1
onap/so/vfc-adapter,1.3.1
onap/sparky-be,1.3.0
-onap/usecase-ui,1.2.0
+onap/usecase-ui,1.2.1
onap/usecase-ui-server,1.2.0
onap/validation,1.3.0
onap/vfc/catalog,1.2.0
org.onap.aai.esr-server,aai-esr-server,1.2.1
org.onap.aai.esr-server,esr-manager,1.2.1
org.onap.aai.esr-server,standalone,1.2.1
-org.onap.aai.event-client,event-client,1.3.0
-org.onap.aai.event-client,event-client-api,1.3.0
-org.onap.aai.event-client,event-client-dmaap,1.3.0
-org.onap.aai.event-client,event-client-kafka,1.3.0
+org.onap.aai.event-client,event-client,1.3.1
+org.onap.aai.event-client,event-client-api,1.3.1
+org.onap.aai.event-client,event-client-dmaap,1.3.1
+org.onap.aai.event-client,event-client-kafka,1.3.1
org.onap.aai.gizmo,gizmo,1.3.0
org.onap.aai.graphadmin,graphadmin,1.3.0
org.onap.aai.logging-service,common-logging,1.3.0
org.onap.ccsdk.sli.plugins,restapi-call-node-provider,0.3.1
org.onap.ccsdk.storage.pgaas,pgaas,1.0.0
org.onap.ccsdk.utils,utils,1.0.0
-org.onap.clamp.clds.clamp,clamp,3.0.1
+org.onap.clamp.clds.clamp,clamp,3.0.2
org.onap.cli,cli-framework,2.0.4
org.onap.cli,cli-main,2.0.4
org.onap.cli,cli-plugins-sample,2.0.4