Use k3s in pipeline 52/142052/69
authorFiete Ostkamp <fiete.ostkamp@telekom.de>
Mon, 15 Sep 2025 13:55:13 +0000 (15:55 +0200)
committerFiete Ostkamp <fiete.ostkamp@telekom.de>
Thu, 18 Sep 2025 10:36:19 +0000 (12:36 +0200)
- replace kubespray with k3s which is
- much simpler to set up + does not need
  to be 'production-ready' to test
  deployment in pipeline
- disable addon installation
- enable k8s-plugin test run by verify-shell
- do not enable the other plugin_ tests since that
  would be too much for this CR
- pull docker images through nexus proxy to
  avoid rate limiting from dockerhub
- bump snapshot version to 0.10.2-SNAPSHOT

Issue-ID: MULTICLOUD-1513
Change-Id: I8ca40dff67ddf41153bdd2f7cfa07a22fb2b03f5
Signed-off-by: Fiete Ostkamp <fiete.ostkamp@telekom.de>
22 files changed:
build/Dockerfile
deployments/Dockerfile
deployments/_functions.sh
deployments/build.sh
deployments/docker-compose.yml
docs/kud_architecture.rst
kud/hosting_providers/vagrant/installer.sh
kud/tests/_common_test.sh
kud/tests/_functions.sh
kud/tests/emco.sh
kud/tests/negative_tests/_test_functions.sh
kud/tests/negative_tests/_test_variables_setup.sh
kud/tests/plugin.sh
kud/tests/plugin_collection_v2.sh
kud/tests/plugin_eaa.sh
kud/tests/plugin_edgex.sh
kud/tests/plugin_fw.sh
kud/tests/plugin_ncm_v2.sh
src/k8splugin/Makefile
src/k8splugin/go.mod
src/k8splugin/go.sum
src/orchestrator/scripts/docker-compose.yml

index d7ba3c3..2378773 100644 (file)
@@ -7,7 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-FROM golang:1.14.1
+FROM nexus3.onap.org:10001/golang:1.14.1
 
 WORKDIR /go/src/github.com/onap/multicloud-k8s
 COPY ./ ./
@@ -41,4 +41,3 @@ COPY --chown=emco --from=0 /go/src/github.com/onap/multicloud-k8s/src/ovnaction/
 USER emco
 
 ENTRYPOINT ["./entrypoint"]
-
index 842345c..abe37ad 100644 (file)
@@ -7,7 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-FROM ubuntu:18.04
+FROM nexus3.onap.org:10001/ubuntu:18.04
 
 ARG HTTP_PROXY=${HTTP_PROXY}
 ARG HTTPS_PROXY=${HTTPS_PROXY}
index c0feed0..3d4e033 100755 (executable)
@@ -9,12 +9,12 @@
 ##############################################################################
 
 function stop_all {
-    docker-compose kill
-    docker-compose down
+    docker compose kill
+    docker compose down
 }
 
 function start_mongo {
-    docker-compose up -d mongo
+    docker compose up -d mongo
     export DATABASE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(docker ps -aqf "name=mongo"))
     export no_proxy=${no_proxy:-},${DATABASE_IP}
     export NO_PROXY=${NO_PROXY:-},${DATABASE_IP}
@@ -32,16 +32,18 @@ EOF
 }
 
 function start_all {
-    docker-compose up -d
+    docker compose up -d
 }
 
 function wait_for_service {
-    for try in {0..59}; do
+    for try in {0..29}; do
         echo "$(date +%H:%M:%S) - Waiting for service up"
         sleep 1
         if $(curl http://localhost:9015/v1 &>/dev/null); then
             return 0
         fi
     done
+    echo Wait failed
+    docker compose logs
     exit 1
 }
index f3d195f..b1e0771 100755 (executable)
@@ -13,13 +13,17 @@ set -o pipefail
 
 k8s_path="$(git rev-parse --show-toplevel)"
 
-VERSION="0.10.1-SNAPSHOT"
+VERSION="0.10.2-SNAPSHOT"
 export IMAGE_NAME="nexus3.onap.org:10003/onap/multicloud/k8s"
 
 function _compile_src {
     echo "Compiling source code"
+    go version
+    ls
     pushd $k8s_path/src/k8splugin/
-    make
+    pwd
+    # mount directory and build in container (thus not relying on the state of the runner)
+    docker run --rm -v "$PWD":/usr/src/myapp -w /usr/src/myapp golang:1.14 make
     popd
 }
 
@@ -37,11 +41,19 @@ function _cleanup {
     if [[ -n ${image} ]]; then
         docker images ${image#*:} -q | xargs docker rmi -f
     fi
-    docker ps -a --filter "status=exited" -q | xargs docker rm
+
+    exited_containers=$(docker ps -a --filter "status=exited" -q)
+    if [[ -n "$exited_containers" ]]; then
+        echo "Removing exited containers..."
+        echo "$exited_containers" | xargs docker rm
+    else
+        echo "Nothing to remove"
+    fi
 }
 
 function _build_docker {
     echo "Building docker image"
+    apt-get update && apt-get install -y docker-compose-plugin
     docker-compose build --no-cache
 }
 
index 3fa82fe..af8c990 100644 (file)
@@ -25,14 +25,41 @@ services:
       - HTTPS_PROXY=${HTTPS_PROXY}
       - NO_PROXY=${NO_PROXY},mongo
     depends_on:
-      - mongo
+      mongo:
+        condition: service_healthy
     network_mode: host
+    ports:
+      - 9015:9015
     volumes:
       - /opt/csar:/opt/csar
       - ${PWD}/k8sconfig.json:/opt/multicloud/k8splugin/k8sconfig.json:ro
+    restart: always
+    healthcheck:
+      test: ["CMD", "curl", "http://localhost:9015/v1"]
   mongo:
-    image: mongo
+    image: nexus3.onap.org:10001/mongo:5.0.28
     environment:
       - HTTP_PROXY=${HTTP_PROXY}
       - HTTPS_PROXY=${HTTPS_PROXY}
       - NO_PROXY=${NO_PROXY}
+    healthcheck:
+      test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
+    ports:
+      - 27017:27017
+    restart: always
+  etcd:
+    image: nexus3.onap.org:10001/bitnami/etcd:3
+    environment:
+      - ALLOW_NONE_AUTHENTICATION=yes
+      - HTTP_PROXY=${HTTP_PROXY}
+      - HTTPS_PROXY=${HTTPS_PROXY}
+      - NO_PROXY=${NO_PROXY}
+    volumes:
+      - etcd_data:/bitnami/etcd
+    ports:
+      - 2379:2379
+      - 2380:2380
+
+volumes:
+  etcd_data:
+    driver: local
index 2f72441..428d04a 100644 (file)
@@ -130,7 +130,7 @@ This bash script is used for the installation and configuration of
 dependencies required for the usage of the KUD via Virtual Machines.
 Some of this dependencies are:
 
-  - `Vagrant <https://www.vagrantup.com/>`_,
+  - Vagrant,
   - `Libvirt <https://libvirt.org/>`_ or `VirtualBox <https://www.virtualbox.org/>`_
 
 The *-p* argument determines the Virtualization provider to be used
index b69a0b3..5a7ea21 100755 (executable)
@@ -30,7 +30,7 @@ function _install_go {
         return
     fi
 
-    wget https://dl.google.com/go/$tarball
+    wget -nv https://dl.google.com/go/$tarball
     sudo tar -C /usr/local -xzf $tarball
     rm $tarball
 
@@ -60,9 +60,9 @@ function _set_environment_file {
     OVN_CENTRAL_INTERFACE="${OVN_CENTRAL_INTERFACE:-$(ip addr show | awk '/inet.*brd/{print $NF; exit}')}"
     echo "export OVN_CENTRAL_INTERFACE=${OVN_CENTRAL_INTERFACE}" | sudo tee --append /etc/environment
     echo "export OVN_CENTRAL_ADDRESS=$(get_ovn_central_address)" | sudo tee --append /etc/environment
-    echo "export KUBE_CONFIG_DIR=/opt/kubeconfig" | sudo tee --append /etc/environment
+    echo "export KUBE_CONFIG_DIR=$HOME/.kube" | sudo tee --append /etc/environment
     echo "export CSAR_DIR=/opt/csar" | sudo tee --append /etc/environment
-    echo "export ANSIBLE_CONFIG=${ANSIBLE_CONFIG}" | sudo tee --append /etc/environment
+    echo "export ANSIBLE_CONFIG=${ANSIBLE_CONFIG}" | sudo tee --append /etc/environment
 }
 
 # install_k8s() - Install Kubernetes using kubespray tool
@@ -141,6 +141,18 @@ function install_k8s {
     sudo cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
 }
 
+function install_k3s {
+    echo "Installing k3s..."
+
+    curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -
+
+    systemctl status k3s
+
+    sudo kubectl get all -n kube-system
+
+    sudo kubectl cluster-info
+}
+
 # install_addons() - Install Kubenertes AddOns
 function install_addons {
     source /etc/environment
@@ -219,21 +231,25 @@ function install_addons {
 # install_plugin() - Install ONAP Multicloud Kubernetes plugin
 function install_plugin {
     echo "Installing multicloud/k8s plugin"
-    sudo -E pip install --no-cache-dir docker-compose
 
     sudo mkdir -p /opt/{kubeconfig,consul/config}
-    sudo cp $HOME/.kube/config /opt/kubeconfig/kud
+    # sudo cp $HOME/.kube/config /opt/kubeconfig/kud
+    mkdir -p $WORKSPACE/.kube
+    kubeconfig_path="$WORKSPACE/.kube/config"
+    sudo cat /etc/rancher/k3s/k3s.yaml > $kubeconfig_path
 
     pushd $kud_folder/../../../deployments
     sudo ./build.sh
     if [[ "${testing_enabled}" == "true" ]]; then
         sudo ./start.sh
         pushd $kud_tests
-        plugin_tests="plugin plugin_edgex plugin_fw plugin_eaa"
+        # plugin_tests="plugin plugin_edgex plugin_fw plugin_eaa"
+        plugin_tests="plugin" # TODO: re-enable plugin_edgex plugin_fw plugin_eaa
         if [[ $kubespray_version == "2.16.0" ]]; then
             plugin_tests=${plugin_tests//plugin_fw/};
         fi
         for functional_test in ${plugin_tests}; do
+            echo "running $functional_test.sh"
             bash ${functional_test}.sh
         done
         popd
@@ -243,15 +259,15 @@ function install_plugin {
 
 # _print_kubernetes_info() - Prints the login Kubernetes information
 function _print_kubernetes_info {
-    if ! $(kubectl version &>/dev/null); then
+    if ! $(sudo kubectl version &>/dev/null); then
         return
     fi
     # Expose Dashboard using NodePort
     node_port=30080
-    KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
-    KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+    KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
+    KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
 
-    master_ip=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}')
+    master_ip=$(sudo kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}')
 
     printf "Kubernetes Info\n===============\n" > $k8s_info_file
     echo "Dashboard URL: https://$master_ip:$node_port" >> $k8s_info_file
@@ -294,7 +310,7 @@ kud_inventory=$kud_inventory_folder/hosts.ini
 kud_playbooks=$kud_infra_folder/playbooks
 kud_tests=$kud_folder/../../tests
 k8s_info_file=$kud_folder/k8s_info.log
-testing_enabled=${KUD_ENABLE_TESTS:-false}
+testing_enabled=${KUD_ENABLE_TESTS:-true}
 container_runtime=${CONTAINER_RUNTIME:-docker}
 enable_kata_webhook=${ENABLE_KATA_WEBHOOK:-false}
 kata_webhook_runtimeclass=${KATA_WEBHOOK_RUNTIMECLASS:-kata-clh}
@@ -326,10 +342,11 @@ sudo ls /etc/apt/sources.list.d/ || true
 sudo find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
 sudo apt-get update
 _install_go
-install_k8s
+# install_k8s
+install_k3s
 _set_environment_file
-install_addons
-if ${KUD_PLUGIN_ENABLED:-false}; then
+install_addons
+if ${KUD_PLUGIN_ENABLED:-true}; then
     install_plugin
 fi
-_print_kubernetes_info
+_print_kubernetes_info
index 0ca606b..a00391c 100755 (executable)
@@ -20,6 +20,12 @@ function install_deps {
         }
         install_packages "" ubuntu_deps ""
     fi
+    if ! $(helm --version &>/dev/null); then
+        function ubuntu_deps {
+            sudo snap install --classic helm
+        }
+        install_packages "" ubuntu_deps ""
+    fi
 }
 
 # install_ipcalc() - Install ipcalc for tests
index 4c26195..6d24602 100755 (executable)
@@ -186,8 +186,8 @@ function _destroy {
     local name=$2
 
     echo "$(date +%H:%M:%S) - $name : Destroying $type"
-    kubectl delete $type $name --ignore-not-found=true --now
-    while kubectl get $type $name &>/dev/null; do
+    sudo kubectl delete $type $name --ignore-not-found=true --now
+    while sudo kubectl get $type $name &>/dev/null; do
         echo "$(date +%H:%M:%S) - $name : Destroying $type"
     done
 }
index 109f563..5b2c3d3 100755 (executable)
@@ -32,7 +32,7 @@ app1_profile_path="$CSAR_DIR/$csar_id/prometheus-operator_profile.tar.gz"
 app2_helm_path="$CSAR_DIR/$csar_id/collectd.tar.gz"
 app2_profile_path="$CSAR_DIR/$csar_id/collectd_profile.tar.gz"
 
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 
 function populate_CSAR_composite_app_helm {
     _checks_args "$1"
index f05c21f..25e243c 100755 (executable)
@@ -50,6 +50,11 @@ function call_api_negative {
         return 2
     else
         echo "[INFO] Server replied with status: ${status}" >&2
+        if [[ "${status}" -gt 400 ]]; then
+            echo "[DEBUG] curl_response_file path: ${curl_response_file}"
+            echo "[DEBUG] Listing contents of /tmp:"
+            ls -lh /tmp
+        fi
         cat "${curl_response_file}"
         rm "${curl_response_file}"
         return_status=$status
index 1630c03..0364209 100755 (executable)
@@ -15,7 +15,7 @@
 #  */
 
 base_url=${base_url:-"http://localhost:9015/v2"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 csar_id=cb009bfe-bbee-11e8-9766-525400435678
 
 project_name="test_project"
index aff0412..816a4d8 100755 (executable)
@@ -18,7 +18,7 @@ source _common_test.sh
 source _functions.sh
 
 base_url="http://localhost:9015/v1"
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 #Will resolve to file $KUBE_CONFIG_DIR/kud
 cloud_region_id="kud"
 cloud_region_owner="test_owner"
@@ -94,7 +94,7 @@ if [[ "$rb_list" != *"${rb_name}"* ]]; then
 fi
 
 print_msg "Create Resource Bundle Profile Metadata"
-kubeversion=$(kubectl version | grep 'Server Version' | awk -F '"' '{print $6}')
+kubeversion=$(sudo kubectl version | grep 'Server Version' | awk -F '"' '{print $6}')
 payload="
 {
     \"profile-name\": \"${profile_name}\",
@@ -121,7 +121,9 @@ if [[ "$rbp_ret" != *"${profile_name}"* ]]; then
     exit 1
 fi
 
+
 print_msg "Setup cloud data"
+
 payload="$(cat <<EOF
 {
     "cloud-region": "$cloud_region_id",
@@ -147,8 +149,8 @@ echo "$inst_id"
 inst_id=$(jq -r '.id' <<< "$inst_id")
 
 print_msg "Validating Kubernetes"
-kubectl get --no-headers=true --namespace=${namespace} deployment ${release_name}-vault-consul-dev
-kubectl get --no-headers=true --namespace=${namespace} service override-vault-consul
+sudo kubectl get --no-headers=true --namespace=${namespace} deployment ${release_name}-vault-consul-dev
+sudo kubectl get --no-headers=true --namespace=${namespace} service override-vault-consul
 echo "VNF Instance created succesfully with id: $inst_id"
 
 print_msg "Getting $inst_id VNF Instance information"
@@ -170,3 +172,7 @@ print_msg "Deleting ${cloud_region_id} cloud region connection"
 delete_resource "${base_url}/connectivity-info/${cloud_region_id}"
 
 teardown $plugin_deployment_name
+
+echo plugin.sh tests done.
+# echo Container logs:
+# docker compose logs -f $WORKSPACE/deployments/docker-compose.yml deployments-multicloud-k8s-1
index 5695dfc..df61099 100755 (executable)
@@ -32,7 +32,7 @@ fi
 
 base_url=${base_url:-"http://localhost:9015/v2"}
 
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 csar_id=cb009bfe-bbee-11e8-9766-525400435678
 
 
index 1fedbbf..d5c69ba 100755 (executable)
@@ -25,7 +25,7 @@ if [ ${1:+1} ]; then
 fi
 
 base_url=${base_url:-"http://localhost:9015/v1"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 csar_eaa_id=8030a02a-7253-11ea-bc55-0242ac130003
 csar_sample_app_id=150da0b3-aa8c-481e-b661-2620b810765e
 rb_eaa_name="eaa"
@@ -158,23 +158,23 @@ wait_for_deployment producer 1
 wait_for_deployment consumer 1
 
 print_msg "Validating EAA is running"
-kubectl get --namespace=${namespace_eaa} pods | grep eaa
+sudo kubectl get --namespace=${namespace_eaa} pods | grep eaa
 
 print_msg "Validating sample producer and sample consumer are running"
-kubectl get --namespace=${namespace_sample_app}  pods | grep producer
-kubectl get --namespace=${namespace_sample_app} pods | grep consumer
+sudo kubectl get --namespace=${namespace_sample_app}  pods | grep producer
+sudo kubectl get --namespace=${namespace_sample_app} pods | grep consumer
 
 print_msg "Validating logs of EAA"
-EAA=`kubectl get --namespace=${namespace_eaa} pods | grep eaa | awk '{print $1}'`
-kubectl logs --namespace=${namespace_eaa}  ${EAA}
+EAA=`sudo kubectl get --namespace=${namespace_eaa} pods | grep eaa | awk '{print $1}'`
+sudo kubectl logs --namespace=${namespace_eaa}  ${EAA}
 
 print_msg "Validating logs of sample producer and sample consumer"
 # sleep 5 seconds to let producer and consumer generate some logs
 sleep 5
-PRODUCER=`kubectl get --namespace=${namespace_sample_app} pods | grep producer | awk '{print $1}'`
-CONSUMER=`kubectl get --namespace=${namespace_sample_app} pods | grep consumer | awk '{print $1}'`
-kubectl logs --namespace=${namespace_sample_app} ${PRODUCER}
-kubectl logs --namespace=${namespace_sample_app} ${CONSUMER}
+PRODUCER=`sudo kubectl get --namespace=${namespace_sample_app} pods | grep producer | awk '{print $1}'`
+CONSUMER=`sudo kubectl get --namespace=${namespace_sample_app} pods | grep consumer | awk '{print $1}'`
+sudo kubectl logs --namespace=${namespace_sample_app} ${PRODUCER}
+sudo kubectl logs --namespace=${namespace_sample_app} ${CONSUMER}
 
 print_msg "Retrieving EAA details"
 call_api "${base_url}/instance/${vnf_eaa_id}"
index ff27ab2..abc235a 100755 (executable)
@@ -25,7 +25,7 @@ if [ ${1:+1} ]; then
 fi
 
 base_url=${base_url:-"http://localhost:9015/v1"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 csar_id=cb009bfe-bbee-11e8-9766-525400435678
 rb_name="edgex"
 rb_version="plugin_test"
@@ -99,11 +99,13 @@ echo "$response"
 vnf_id="$(jq -r '.id' <<< "${response}")"
 
 print_msg "Waiting for EdgeX instances"
-sleep 240
+# sleep 240 # TODO: enable this again once pipeline work is done
+# sudo timeout 240s bash -c 'until kubectl get --no-headers=true --namespace=${namespace} deployment edgex-core-command; do sleep 10s; done'
+
 
 print_msg "Validating Kubernetes"
-kubectl get --no-headers=true --namespace=${namespace} deployment edgex-core-command
-kubectl get --no-headers=true --namespace=${namespace} service edgex-core-command
+sudo kubectl get --no-headers=true --namespace=${namespace} deployment edgex-core-command
+sudo kubectl get --no-headers=true --namespace=${namespace} service edgex-core-command
 # TODO: Add health checks to verify EdgeX services
 
 print_msg "Retrieving VNF details"
index 0a0f62d..71bc7d4 100755 (executable)
@@ -29,7 +29,7 @@ if [ ${1:+1} ]; then
 fi
 
 base_url=${base_url:-"http://localhost:9015/v1"}
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 csar_id=cc009bfe-bbee-11e8-9766-525400435678
 rb_name="vfw"
 rb_version="plugin_test"
@@ -107,7 +107,7 @@ vnf_id="$(jq -r '.id' <<< "${response}")"
 
 print_msg "[BEGIN] Basic checks for instantiated resource"
 print_msg "Check if override value has been applied correctly"
-kubectl get network -n "${namespace}" onap-private-net-test
+sudo kubectl get network -n "${namespace}" onap-private-net-test
 print_msg "Wait for all pods to start"
 wait_for_pod -n "${namespace}" -l app=sink
 wait_for_pod -n "${namespace}" -l app=firewall
index 92f93ad..3adac89 100755 (executable)
@@ -25,7 +25,7 @@ source _common.sh
 
 base_url=${base_url:-"http://localhost:9019/v2"}
 
-kubeconfig_path="$HOME/.kube/config"
+kubeconfig_path="$WORKSPACE/.kube/config"
 
 cluster_provider_name1="cluster_provider1"
 cluster_provider_name2="cluster_provider2"
@@ -252,4 +252,4 @@ payload="$(cat <<EOF
 }
 EOF
 )"
-call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}/labels"
\ No newline at end of file
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}/labels"
index a1cda8d..235cb23 100644 (file)
@@ -27,7 +27,7 @@ deploy: build
 .PHONY: test
 test: clean
        @$(GO) build -race -buildmode=plugin -o ./mock_files/mock_plugins/mockplugin.so ./mock_files/mock_plugins/mockplugin.go
-       @$(GO) test -race ./...
+       @$(GO) test -timeout 20m -race ./...
 
 format:
        @$(GO) fmt ./...
@@ -41,5 +41,5 @@ clean:
 
 .PHONY: cover
 cover:
-       @$(GO) test -race ./... -coverprofile=coverage.out
+       @$(GO) test -timeout 20m -race ./... -coverprofile=coverage.out
        @$(GO) tool cover -html=coverage.out -o coverage.html
index 671b64a..cd1712e 100644 (file)
@@ -9,6 +9,7 @@ require (
        github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
        github.com/bugsnag/bugsnag-go v2.1.0+incompatible // indirect
        github.com/bugsnag/panicwrap v1.3.2 // indirect
+       github.com/cespare/xxhash/v2 v2.3.0 // indirect
        github.com/coreos/bbolt v1.3.3 // indirect
        github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
        github.com/docker/engine v0.0.0-20190620014054-c513a4c6c298
@@ -33,6 +34,7 @@ require (
        github.com/mitchellh/reflectwalk v1.0.1 // indirect
        github.com/pierrec/lz4 v2.0.5+incompatible // indirect
        github.com/pkg/errors v0.9.1
+       github.com/prometheus/common v0.10.0 // indirect
        github.com/sirupsen/logrus v1.7.0
        github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
        github.com/xdg/stringprep v1.0.0 // indirect
index 3eaafcc..d069bb9 100644 (file)
@@ -102,8 +102,10 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia
 github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
 github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@@ -163,6 +165,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
 github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
 github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
@@ -1442,6 +1446,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
 google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
index 3bb7bda..5c20504 100644 (file)
@@ -33,7 +33,7 @@ services:
     ports:
       - 9015:9015
   mongo:
-    image: mongo
+    image: mongo:5.0.31
     environment:
       - HTTP_PROXY=${HTTP_PROXY}
       - HTTPS_PROXY=${HTTPS_PROXY}