# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-FROM golang:1.14.1
+FROM nexus3.onap.org:10001/golang:1.14.1
WORKDIR /go/src/github.com/onap/multicloud-k8s
COPY ./ ./
USER emco
ENTRYPOINT ["./entrypoint"]
-
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-FROM ubuntu:18.04
+FROM nexus3.onap.org:10001/ubuntu:18.04
ARG HTTP_PROXY=${HTTP_PROXY}
ARG HTTPS_PROXY=${HTTPS_PROXY}
##############################################################################
function stop_all {
- docker-compose kill
- docker-compose down
+ docker compose kill
+ docker compose down
}
function start_mongo {
- docker-compose up -d mongo
+ docker compose up -d mongo
export DATABASE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(docker ps -aqf "name=mongo"))
export no_proxy=${no_proxy:-},${DATABASE_IP}
export NO_PROXY=${NO_PROXY:-},${DATABASE_IP}
}
function start_all {
- docker-compose up -d
+ docker compose up -d
}
function wait_for_service {
- for try in {0..59}; do
+ for try in {0..29}; do
echo "$(date +%H:%M:%S) - Waiting for service up"
sleep 1
if $(curl http://localhost:9015/v1 &>/dev/null); then
return 0
fi
done
+ echo Wait failed
+ docker compose logs
exit 1
}
k8s_path="$(git rev-parse --show-toplevel)"
-VERSION="0.10.1-SNAPSHOT"
+VERSION="0.10.2-SNAPSHOT"
export IMAGE_NAME="nexus3.onap.org:10003/onap/multicloud/k8s"
function _compile_src {
echo "Compiling source code"
+ go version
+ ls
pushd $k8s_path/src/k8splugin/
- make
+ pwd
+ # mount directory and build in container (thus not relying on the state of the runner)
+ docker run --rm -v "$PWD":/usr/src/myapp -w /usr/src/myapp golang:1.14 make
popd
}
if [[ -n ${image} ]]; then
docker images ${image#*:} -q | xargs docker rmi -f
fi
- docker ps -a --filter "status=exited" -q | xargs docker rm
+
+ exited_containers=$(docker ps -a --filter "status=exited" -q)
+ if [[ -n "$exited_containers" ]]; then
+ echo "Removing exited containers..."
+ echo "$exited_containers" | xargs docker rm
+ else
+ echo "Nothing to remove"
+ fi
}
function _build_docker {
echo "Building docker image"
+ apt-get update && apt-get install -y docker-compose-plugin
docker-compose build --no-cache
}
- HTTPS_PROXY=${HTTPS_PROXY}
- NO_PROXY=${NO_PROXY},mongo
depends_on:
- - mongo
+ mongo:
+ condition: service_healthy
network_mode: host
+ ports:
+ - 9015:9015
volumes:
- /opt/csar:/opt/csar
- ${PWD}/k8sconfig.json:/opt/multicloud/k8splugin/k8sconfig.json:ro
+ restart: always
+ healthcheck:
+ test: ["CMD", "curl", "http://localhost:9015/v1"]
mongo:
- image: mongo
+ image: nexus3.onap.org:10001/mongo:5.0.28
environment:
- HTTP_PROXY=${HTTP_PROXY}
- HTTPS_PROXY=${HTTPS_PROXY}
- NO_PROXY=${NO_PROXY}
+ healthcheck:
+ test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
+ ports:
+ - 27017:27017
+ restart: always
+ etcd:
+ image: nexus3.onap.org:10001/bitnami/etcd:3
+ environment:
+ - ALLOW_NONE_AUTHENTICATION=yes
+ - HTTP_PROXY=${HTTP_PROXY}
+ - HTTPS_PROXY=${HTTPS_PROXY}
+ - NO_PROXY=${NO_PROXY}
+ volumes:
+ - etcd_data:/bitnami/etcd
+ ports:
+ - 2379:2379
+ - 2380:2380
+
+volumes:
+ etcd_data:
+ driver: local
dependencies required for the usage of the KUD via Virtual Machines.
Some of this dependencies are:
- - `Vagrant <https://www.vagrantup.com/>`_,
+ - Vagrant,
- `Libvirt <https://libvirt.org/>`_ or `VirtualBox <https://www.virtualbox.org/>`_
The *-p* argument determines the Virtualization provider to be used
return
fi
- wget https://dl.google.com/go/$tarball
+ wget -nv https://dl.google.com/go/$tarball
sudo tar -C /usr/local -xzf $tarball
rm $tarball
OVN_CENTRAL_INTERFACE="${OVN_CENTRAL_INTERFACE:-$(ip addr show | awk '/inet.*brd/{print $NF; exit}')}"
echo "export OVN_CENTRAL_INTERFACE=${OVN_CENTRAL_INTERFACE}" | sudo tee --append /etc/environment
echo "export OVN_CENTRAL_ADDRESS=$(get_ovn_central_address)" | sudo tee --append /etc/environment
- echo "export KUBE_CONFIG_DIR=/opt/kubeconfig" | sudo tee --append /etc/environment
+ echo "export KUBE_CONFIG_DIR=$HOME/.kube" | sudo tee --append /etc/environment
echo "export CSAR_DIR=/opt/csar" | sudo tee --append /etc/environment
- echo "export ANSIBLE_CONFIG=${ANSIBLE_CONFIG}" | sudo tee --append /etc/environment
+ # echo "export ANSIBLE_CONFIG=${ANSIBLE_CONFIG}" | sudo tee --append /etc/environment
}
# install_k8s() - Install Kubernetes using kubespray tool
sudo cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
}
+function install_k3s {
+ echo "Installing k3s..."
+
+ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -
+
+ systemctl status k3s
+
+ sudo kubectl get all -n kube-system
+
+ sudo kubectl cluster-info
+}
+
# install_addons() - Install Kubenertes AddOns
function install_addons {
source /etc/environment
# install_plugin() - Install ONAP Multicloud Kubernetes plugin
function install_plugin {
echo "Installing multicloud/k8s plugin"
- sudo -E pip install --no-cache-dir docker-compose
sudo mkdir -p /opt/{kubeconfig,consul/config}
- sudo cp $HOME/.kube/config /opt/kubeconfig/kud
+ # sudo cp $HOME/.kube/config /opt/kubeconfig/kud
+ mkdir -p $WORKSPACE/.kube
+ kubeconfig_path="$WORKSPACE/.kube/config"
+ sudo cat /etc/rancher/k3s/k3s.yaml > $kubeconfig_path
pushd $kud_folder/../../../deployments
sudo ./build.sh
if [[ "${testing_enabled}" == "true" ]]; then
sudo ./start.sh
pushd $kud_tests
- plugin_tests="plugin plugin_edgex plugin_fw plugin_eaa"
+ # plugin_tests="plugin plugin_edgex plugin_fw plugin_eaa"
+ plugin_tests="plugin" # TODO: re-enable plugin_edgex plugin_fw plugin_eaa
if [[ $kubespray_version == "2.16.0" ]]; then
plugin_tests=${plugin_tests//plugin_fw/};
fi
for functional_test in ${plugin_tests}; do
+ echo "running $functional_test.sh"
bash ${functional_test}.sh
done
popd
# _print_kubernetes_info() - Prints the login Kubernetes information
function _print_kubernetes_info {
- if ! $(kubectl version &>/dev/null); then
+ if ! $(sudo kubectl version &>/dev/null); then
return
fi
# Expose Dashboard using NodePort
node_port=30080
- KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
- KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+ # KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+ # KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
- master_ip=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}')
+ master_ip=$(sudo kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}')
printf "Kubernetes Info\n===============\n" > $k8s_info_file
echo "Dashboard URL: https://$master_ip:$node_port" >> $k8s_info_file
kud_playbooks=$kud_infra_folder/playbooks
kud_tests=$kud_folder/../../tests
k8s_info_file=$kud_folder/k8s_info.log
-testing_enabled=${KUD_ENABLE_TESTS:-false}
+testing_enabled=${KUD_ENABLE_TESTS:-true}
container_runtime=${CONTAINER_RUNTIME:-docker}
enable_kata_webhook=${ENABLE_KATA_WEBHOOK:-false}
kata_webhook_runtimeclass=${KATA_WEBHOOK_RUNTIMECLASS:-kata-clh}
sudo find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
sudo apt-get update
_install_go
-install_k8s
+# install_k8s
+install_k3s
_set_environment_file
-install_addons
-if ${KUD_PLUGIN_ENABLED:-false}; then
+# install_addons
+if ${KUD_PLUGIN_ENABLED:-true}; then
install_plugin
fi
-_print_kubernetes_info
+# _print_kubernetes_info
}
install_packages "" ubuntu_deps ""
fi
+ if ! $(helm --version &>/dev/null); then
+ function ubuntu_deps {
+ sudo snap install --classic helm
+ }
+ install_packages "" ubuntu_deps ""
+ fi
}
# install_ipcalc() - Install ipcalc for tests
local name=$2
echo "$(date +%H:%M:%S) - $name : Destroying $type"
- kubectl delete $type $name --ignore-not-found=true --now
- while kubectl get $type $name &>/dev/null; do
+ sudo kubectl delete $type $name --ignore-not-found=true --now
+ while sudo kubectl get $type $name &>/dev/null; do
echo "$(date +%H:%M:%S) - $name : Destroying $type"
done
}
app2_helm_path="$CSAR_DIR/$csar_id/collectd.tar.gz"
app2_profile_path="$CSAR_DIR/$csar_id/collectd_profile.tar.gz"
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
function populate_CSAR_composite_app_helm {
_checks_args "$1"
return 2
else
echo "[INFO] Server replied with status: ${status}" >&2
+ if [[ "${status}" -gt 400 ]]; then
+ echo "[DEBUG] curl_response_file path: ${curl_response_file}"
+ echo "[DEBUG] Listing contents of /tmp:"
+ ls -lh /tmp
+ fi
cat "${curl_response_file}"
rm "${curl_response_file}"
return_status=$status
# */
base_url=${base_url:-"http://localhost:9015/v2"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
csar_id=cb009bfe-bbee-11e8-9766-525400435678
project_name="test_project"
source _functions.sh
base_url="http://localhost:9015/v1"
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
#Will resolve to file $KUBE_CONFIG_DIR/kud
cloud_region_id="kud"
cloud_region_owner="test_owner"
fi
print_msg "Create Resource Bundle Profile Metadata"
-kubeversion=$(kubectl version | grep 'Server Version' | awk -F '"' '{print $6}')
+kubeversion=$(sudo kubectl version | grep 'Server Version' | awk -F '"' '{print $6}')
payload="
{
\"profile-name\": \"${profile_name}\",
exit 1
fi
+
print_msg "Setup cloud data"
+
payload="$(cat <<EOF
{
"cloud-region": "$cloud_region_id",
inst_id=$(jq -r '.id' <<< "$inst_id")
print_msg "Validating Kubernetes"
-kubectl get --no-headers=true --namespace=${namespace} deployment ${release_name}-vault-consul-dev
-kubectl get --no-headers=true --namespace=${namespace} service override-vault-consul
+sudo kubectl get --no-headers=true --namespace=${namespace} deployment ${release_name}-vault-consul-dev
+sudo kubectl get --no-headers=true --namespace=${namespace} service override-vault-consul
echo "VNF Instance created succesfully with id: $inst_id"
print_msg "Getting $inst_id VNF Instance information"
delete_resource "${base_url}/connectivity-info/${cloud_region_id}"
teardown $plugin_deployment_name
+
+echo plugin.sh tests done.
+# echo Container logs:
+# docker compose logs -f $WORKSPACE/deployments/docker-compose.yml deployments-multicloud-k8s-1
base_url=${base_url:-"http://localhost:9015/v2"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
csar_id=cb009bfe-bbee-11e8-9766-525400435678
fi
base_url=${base_url:-"http://localhost:9015/v1"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
csar_eaa_id=8030a02a-7253-11ea-bc55-0242ac130003
csar_sample_app_id=150da0b3-aa8c-481e-b661-2620b810765e
rb_eaa_name="eaa"
wait_for_deployment consumer 1
print_msg "Validating EAA is running"
-kubectl get --namespace=${namespace_eaa} pods | grep eaa
+sudo kubectl get --namespace=${namespace_eaa} pods | grep eaa
print_msg "Validating sample producer and sample consumer are running"
-kubectl get --namespace=${namespace_sample_app} pods | grep producer
-kubectl get --namespace=${namespace_sample_app} pods | grep consumer
+sudo kubectl get --namespace=${namespace_sample_app} pods | grep producer
+sudo kubectl get --namespace=${namespace_sample_app} pods | grep consumer
print_msg "Validating logs of EAA"
-EAA=`kubectl get --namespace=${namespace_eaa} pods | grep eaa | awk '{print $1}'`
-kubectl logs --namespace=${namespace_eaa} ${EAA}
+EAA=`sudo kubectl get --namespace=${namespace_eaa} pods | grep eaa | awk '{print $1}'`
+sudo kubectl logs --namespace=${namespace_eaa} ${EAA}
print_msg "Validating logs of sample producer and sample consumer"
# sleep 5 seconds to let producer and consumer generate some logs
sleep 5
-PRODUCER=`kubectl get --namespace=${namespace_sample_app} pods | grep producer | awk '{print $1}'`
-CONSUMER=`kubectl get --namespace=${namespace_sample_app} pods | grep consumer | awk '{print $1}'`
-kubectl logs --namespace=${namespace_sample_app} ${PRODUCER}
-kubectl logs --namespace=${namespace_sample_app} ${CONSUMER}
+PRODUCER=`sudo kubectl get --namespace=${namespace_sample_app} pods | grep producer | awk '{print $1}'`
+CONSUMER=`sudo kubectl get --namespace=${namespace_sample_app} pods | grep consumer | awk '{print $1}'`
+sudo kubectl logs --namespace=${namespace_sample_app} ${PRODUCER}
+sudo kubectl logs --namespace=${namespace_sample_app} ${CONSUMER}
print_msg "Retrieving EAA details"
call_api "${base_url}/instance/${vnf_eaa_id}"
fi
base_url=${base_url:-"http://localhost:9015/v1"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
csar_id=cb009bfe-bbee-11e8-9766-525400435678
rb_name="edgex"
rb_version="plugin_test"
vnf_id="$(jq -r '.id' <<< "${response}")"
print_msg "Waiting for EdgeX instances"
-sleep 240
+# sleep 240 # TODO: enable this again once pipeline work is done
+# sudo timeout 240s bash -c 'until kubectl get --no-headers=true --namespace=${namespace} deployment edgex-core-command; do sleep 10s; done'
+
print_msg "Validating Kubernetes"
-kubectl get --no-headers=true --namespace=${namespace} deployment edgex-core-command
-kubectl get --no-headers=true --namespace=${namespace} service edgex-core-command
+sudo kubectl get --no-headers=true --namespace=${namespace} deployment edgex-core-command
+sudo kubectl get --no-headers=true --namespace=${namespace} service edgex-core-command
# TODO: Add health checks to verify EdgeX services
print_msg "Retrieving VNF details"
fi
base_url=${base_url:-"http://localhost:9015/v1"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
csar_id=cc009bfe-bbee-11e8-9766-525400435678
rb_name="vfw"
rb_version="plugin_test"
print_msg "[BEGIN] Basic checks for instantiated resource"
print_msg "Check if override value has been applied correctly"
-kubectl get network -n "${namespace}" onap-private-net-test
+sudo kubectl get network -n "${namespace}" onap-private-net-test
print_msg "Wait for all pods to start"
wait_for_pod -n "${namespace}" -l app=sink
wait_for_pod -n "${namespace}" -l app=firewall
base_url=${base_url:-"http://localhost:9019/v2"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
cluster_provider_name1="cluster_provider1"
cluster_provider_name2="cluster_provider2"
}
EOF
)"
-call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}/labels"
\ No newline at end of file
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}/labels"
.PHONY: test
test: clean
@$(GO) build -race -buildmode=plugin -o ./mock_files/mock_plugins/mockplugin.so ./mock_files/mock_plugins/mockplugin.go
- @$(GO) test -race ./...
+ @$(GO) test -timeout 20m -race ./...
format:
@$(GO) fmt ./...
.PHONY: cover
cover:
- @$(GO) test -race ./... -coverprofile=coverage.out
+ @$(GO) test -timeout 20m -race ./... -coverprofile=coverage.out
@$(GO) tool cover -html=coverage.out -o coverage.html
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
github.com/bugsnag/bugsnag-go v2.1.0+incompatible // indirect
github.com/bugsnag/panicwrap v1.3.2 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/bbolt v1.3.3 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/docker/engine v0.0.0-20190620014054-c513a4c6c298
github.com/mitchellh/reflectwalk v1.0.1 // indirect
github.com/pierrec/lz4 v2.0.5+incompatible // indirect
github.com/pkg/errors v0.9.1
+ github.com/prometheus/common v0.10.0 // indirect
github.com/sirupsen/logrus v1.7.0
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
github.com/xdg/stringprep v1.0.0 // indirect
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
ports:
- 9015:9015
mongo:
- image: mongo
+ image: mongo:5.0.31
environment:
- HTTP_PROXY=${HTTP_PROXY}
- HTTPS_PROXY=${HTTPS_PROXY}