# which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-- src: andrewrothstein.go
-  version: v2.1.15
 - src: andrewrothstein.kubernetes-helm
-  version: v1.3.16
+  version: v2.0.7
 - src: geerlingguy.docker
   version: 2.5.2
 
 cmk_untaint_required: true
 
 go_version: '1.14.15'
-kubespray_version: 2.14.1
-# This matches the helm_version from kubespray defaults
-helm_client_version: 3.2.4
 # kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
 ansible_version: 2.9.7
 
 
 
 ```
 $ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-101/artifacts/admin.conf cluster-info
-Kubernetes master is running at https://192.168.121.2:6443
+Kubernetes control plane is running at https://192.168.121.2:6443
 coredns is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
 kubernetes-dashboard is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
 
 To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
 $ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-102/artifacts/admin.conf cluster-info
-Kubernetes master is running at https://192.168.121.6:6443
+Kubernetes control plane is running at https://192.168.121.6:6443
 coredns is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
 kubernetes-dashboard is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
 
 
 
 function install_kubespray {
     echo "Deploying kubernetes"
-    version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
-        awk -F ': ' '{print $2}')
+    version=$kubespray_version
     local_release_dir=$(grep "local_release_dir" \
         $kud_inventory_folder/group_vars/k8s-cluster.yml | \
         awk -F "\"" '{print $2}')
         $kud_infra_folder/galaxy-requirements.yml --ignore-errors
 
     ansible-playbook $verbose -i \
-        $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml \
+        $kud_inventory -e "base_dest=$HOME" -e "helm_client_version=$helm_client_version" $kud_playbooks/configure-kud.yml \
         | tee $cluster_log/setup-kud.log
 
     kud_addons="${KUD_ADDONS:-} ${plugins_name}"
             case $addon in
                 "onap4k8s" )
                     echo "Test the onap4k8s plugin installation"
-                    for functional_test in plugin_edgex plugin_fw plugin_eaa; do
+                    for functional_test in plugin_edgex plugin_eaa; do
                         bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
                     done
                     ;;
-                "emco" )
-                    echo "Test the emco plugin installation"
-                    # TODO plugin_fw_v2 requires virtlet and a patched multus to succeed
-                    # for functional_test in plugin_fw_v2; do
-                    #     bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
-                    # done
-                    ;;
             esac
             popd
         done
 fi
 
 # Configuration values
+kubespray_version="2.16.0"
+helm_client_version="3.5.4"
 dest_folder=/opt
 kud_folder=${INSTALLER_DIR}
 kud_infra_folder=$kud_folder/../../deployment_infra
 # For containerd the etcd_deployment_type: docker is the default and doesn't work.
 # You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
 # See https://github.com/kubernetes-sigs/kubespray/issues/5713
+#
+# The JSON notation below is used to prevent false from being interpreted as a
+# string by ansible.
 kud_kata_override_variables="container_manager=containerd \
-    -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs"
+    -e etcd_deployment_type=host"
 
 mkdir -p /opt/csar
 export CSAR_DIR=/opt/csar
 }
 
 function install_cluster {
-    version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
-        awk -F ': ' '{print $2}')
+    version=$kubespray_version
     export ANSIBLE_CONFIG=$dest_folder/kubespray-$version/ansible.cfg
     install_k8s $1
     if [ ${2:+1} ]; then
 
 
 ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
 #kube_oidc_auth: false
-kube_basic_auth: true
+#kube_basic_auth: true
 kube_token_auth: true
 
 # Choose network plugin (calico, contiv, weave or flannel)
 local_volumes_enabled: true
 local_volume_provisioner_enabled: true
 
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.20.7
+
 # Helm deployment
 helm_enabled: true
 helm_stable_repo_url: "https://charts.helm.sh/stable"
 kubelet_node_config_extra_args:
   cpuManagerPolicy: "static" # Options: none (disabled), static (default)
   topologyManagerPolicy: "best-effort" # Options: none (disabled), best-effort (default), restricted, single-numa-node
+
+# Deploy the Kubernetes dashboard
+dashboard_enabled: true
 
 function install_k8s {
     echo "Deploying kubernetes"
     local dest_folder=/opt
-    version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | awk -F ': ' '{print $2}')
+    version=$kubespray_version
     local_release_dir=$(grep "local_release_dir" $kud_inventory_folder/group_vars/k8s-cluster.yml | awk -F "\"" '{print $2}')
     local tarball=v$version.tar.gz
     sudo apt-get install -y sshpass make unzip # install make to run mitogen target and unzip is mitogen playbook dependency
     make mitogen
     popd
     rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null
+    if [[ -n "${kube_version:-}" ]]; then
+        echo "kube_version: $kube_version" | tee --append $kud_inventory_folder/group_vars/all.yml
+    fi
+    if [[ -n "${kube_basic_auth:-}" ]]; then
+        echo "kube_basic_auth: $kube_basic_auth" | tee --append $kud_inventory_folder/group_vars/all.yml
+    fi
+    if [[ -n "${dashboard_enabled:-}" ]]; then
+        echo "dashboard_enabled: $dashboard_enabled" | tee --append $kud_inventory_folder/group_vars/all.yml
+    fi
     if [[ -n "${verbose:-}" ]]; then
-        echo "kube_log_level: 5" | tee $kud_inventory_folder/group_vars/all.yml
+        echo "kube_log_level: 5" | tee --append $kud_inventory_folder/group_vars/all.yml
     else
-        echo "kube_log_level: 2" | tee $kud_inventory_folder/group_vars/all.yml
+        echo "kube_log_level: 2" | tee --append $kud_inventory_folder/group_vars/all.yml
     fi
     echo "kubeadm_enabled: true" | tee --append $kud_inventory_folder/group_vars/all.yml
     if [[ -n "${http_proxy:-}" ]]; then
             --become-user=root | sudo tee $log_folder/setup-kubernetes.log
     elif [ "$container_runtime" == "containerd" ]; then
         /bin/echo -e "\n\e[1;42mContainerd will be used as the container runtime interface\e[0m"
-        # Because the kud_kata_override_variable has its own quotations in it
+        # Because the kud_kata_override_variables has its own quotations in it
         # a eval command is needed to properly execute the ansible script
         ansible_kubespray_cmd="ansible-playbook $verbose -i $kud_inventory \
             $dest_folder/kubespray-$version/cluster.yml \
     echo "Installing Kubernetes AddOns"
     _install_ansible
     sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
-    ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
+    ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" -e "helm_client_version=$helm_client_version" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
 
     # The order of KUD_ADDONS is important: some plugins (sriov, qat)
     # require nfd to be enabled. Some addons are not currently supported with containerd
     if [ "${container_runtime}" == "docker" ]; then
-        kud_addons=${KUD_ADDONS:-virtlet ovn4nfv nfd sriov \
-            qat optane cmk}
+        default_addons="virtlet ovn4nfv nfd sriov qat optane cmk"
+        if [[ $kubespray_version == "2.16.0" ]]; then
+            default_addons=${default_addons//virtlet/};
+        fi
     elif [ "${container_runtime}" == "containerd" ]; then
-        kud_addons=${KUD_ADDONS:-ovn4nfv nfd}
+        default_addons="ovn4nfv nfd"
     fi
+    kud_addons=${KUD_ADDONS:-$default_addons}
 
     for addon in ${kud_addons}; do
         echo "Deploying $addon using configure-$addon.yml playbook.."
     if [[ "${testing_enabled}" == "true" ]]; then
         sudo ./start.sh
         pushd $kud_tests
-        for functional_test in plugin plugin_edgex plugin_fw plugin_eaa; do
+        plugin_tests="plugin plugin_edgex plugin_fw plugin_eaa"
+        if [[ $kubespray_version == "2.16.0" ]]; then
+            plugin_tests=${plugin_tests//plugin_fw/};
+        fi
+        for functional_test in ${plugin_tests}; do
             bash ${functional_test}.sh
         done
         popd
 fi
 
 # Configuration values
+kubespray_version=${KUBESPRAY_VERSION:-2.14.1}
+if [[ $kubespray_version == "2.16.0" ]]; then
+    helm_client_version="3.5.4"
+    kube_version="v1.20.7"
+    dashboard_enabled="true"
+else
+    helm_client_version="3.2.4"
+    kube_basic_auth="true"
+fi
 log_folder=/var/log/kud
 kud_folder=${INSTALLER_DIR}
 kud_infra_folder=$kud_folder/../../deployment_infra
 # For containerd the etcd_deployment_type: docker is the default and doesn't work.
 # You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
 # See https://github.com/kubernetes-sigs/kubespray/issues/5713
+#
+# The JSON notation below is used to prevent false from being interpreted as a
+# string by ansible.
 kud_kata_override_variables="container_manager=containerd \
     -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs \
     -e \"{'download_localhost': false}\" -e \"{'download_run_once': false}\""
+if [[ $kubespray_version == "2.16.0" ]]; then
+    kud_kata_override_variables=${kud_kata_override_variables//-e kubelet_cgroup_driver=cgroupfs/}
+fi
 
 sudo mkdir -p $log_folder
 sudo mkdir -p /opt/csar
 
 
 ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
 #kube_oidc_auth: false
-kube_basic_auth: true
+#kube_basic_auth: true
 kube_token_auth: true
 
 # Choose network plugin (calico, contiv, weave or flannel)
 
     echo -e "${RED} $msg ---------------------------------------${NC}"
 }
 
+function control_plane_ip {
+    kubectl get endpoints kubernetes -o jsonpath='{.subsets[].addresses[].ip}'
+}
+
 function ssh_cluster {
-    master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
-    ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${master_ip} -- "$@"
+    ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $(control_plane_ip) -- "$@"
 }
 
 function get_ovn_central_address {
 
     ! call_api -X GET "$1" >/dev/null
 }
 
-master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-    awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
+master_ip=$(control_plane_ip)
 rsync_service_port=30441
 rsync_service_host="$master_ip"
 base_url_orchestrator=${base_url_orchestrator:-"http://$master_ip:30415/v2"}
 
 fi
 
 # Test
-master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
 deployment_pod=$(kubectl get pods | grep $kubevirt_vmi_name | awk '{print $1}')
 echo "Pod name: $deployment_pod"
 echo "ssh testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}")"
 for ((try=1;try<=$tries;try++)); do
     echo "try $try/$tries: Wait for $interval seconds to check for ssh access"
     sleep $interval
-    if sshpass -p testuser ssh -o ProxyCommand="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p $master_ip" -o StrictHostKeyChecking=no testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}") -- uptime; then
+    if sshpass -p testuser ssh -o ProxyCommand="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p $(control_plane_ip)" -o StrictHostKeyChecking=no testuser@$(kubectl get pods $deployment_pod -o jsonpath="{.status.podIP}") -- uptime; then
         echo "ssh access check is success"
         break
     fi
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 source _functions.sh
 set +e
 
-master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-    awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
 onap_svc_node_port=30498
 declare -i timeout=18
 declare -i interval=10
 
-base_url="http://$master_ip:$onap_svc_node_port/v1"
+base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
 
 function check_onap_svc {
     while ((timeout > 0)); do
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
 if [ ${1:+1} ]; then
     if [ "$1" == "--external" ]; then
-        master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
-            awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
         onap_svc_node_port=30498
-        base_url="http://$master_ip:$onap_svc_node_port/v1"
+        base_url="http://$(control_plane_ip):$onap_svc_node_port/v1"
     fi
 fi
 
 
 
     case $arg in
         "--external" )
-            service_host=$(kubectl cluster-info | grep "Kubernetes master" | \
-                awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
+            service_host=$(control_plane_ip)
             shift
             ;;
         * )