Replace virtlet with kubevirt in plugin_fw_v2
[multicloud/k8s.git] / kud / hosting_providers / containerized / installer.sh
1 #!/bin/bash
2 #SPDX-license-identifier: Apache-2.0
3 ##############################################################################
4 # Copyright (c) 2018
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 set -o errexit
12 set -o nounset
13 set -o pipefail
14 set -ex
15
16 INSTALLER_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
17
18 function install_prerequisites {
19     apt-get update
20     apt-get install -y software-properties-common
21     add-apt-repository -y ppa:longsleep/golang-backports
22     apt-get update
23     apt-get install -y \
24             curl \
25             gettext-base \
26             git \
27             golang-go \
28             make \
29             python3-pip \
30             rsync \
31             sshpass \
32             sudo \
33             unzip \
34             vim \
35             wget
36     update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
37 }
38
39 # _install_ansible() - Install and Configure Ansible program
40 function _install_ansible {
41     local version=$(grep "ansible_version" ${kud_playbooks}/kud-vars.yml |
42         awk -F ': ' '{print $2}')
43     mkdir -p /etc/ansible/
44     pip install --no-cache-dir ansible==$version
45 }
46
47 function install_kubespray {
48     echo "Deploying kubernetes"
49     version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
50         awk -F ': ' '{print $2}')
51     local_release_dir=$(grep "local_release_dir" \
52         $kud_inventory_folder/group_vars/k8s-cluster.yml | \
53         awk -F "\"" '{print $2}')
54     local tarball=v$version.tar.gz
55     _install_ansible
56     wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
57     tar -C $dest_folder -xzf $tarball
58     chown -R root:root $dest_folder/kubespray-$version
59     mkdir -p ${local_release_dir}/containers
60     rm $tarball
61
62     pushd $dest_folder/kubespray-$version/
63     pip install --no-cache-dir -r ./requirements.txt
64     make mitogen
65     popd
66     rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null
67     if [[ -n "${verbose:-}" ]]; then
68         echo "kube_log_level: 5" | tee \
69             $kud_inventory_folder/group_vars/all.yml
70     else
71         echo "kube_log_level: 2" | tee \
72             $kud_inventory_folder/group_vars/all.yml
73     fi
74     echo "kubeadm_enabled: true" | \
75         tee --append $kud_inventory_folder/group_vars/all.yml
76     if [[ -n "${http_proxy:-}" ]]; then
77         echo "http_proxy: \"$http_proxy\"" | tee --append \
78             $kud_inventory_folder/group_vars/all.yml
79     fi
80     if [[ -n "${https_proxy:-}" ]]; then
81         echo "https_proxy: \"$https_proxy\"" | tee --append \
82             $kud_inventory_folder/group_vars/all.yml
83     fi
84 }
85
86 # install_k8s() - Install Kubernetes using kubespray tool including Kata
87 function install_k8s {
88     local cluster_name=$1
89     ansible-playbook $verbose -i \
90         $kud_inventory $kud_playbooks/preconfigure-kubespray.yml \
91         --become --become-user=root | \
92         tee $cluster_log/setup-kubernetes.log
93     if [ "$container_runtime" == "docker" ]; then
94         echo "Docker will be used as the container runtime interface"
95         ansible-playbook $verbose -i \
96             $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
97             -e cluster_name=$cluster_name --become --become-user=root | \
98             tee $cluster_log/setup-kubernetes.log
99     elif [ "$container_runtime" == "containerd" ]; then
100         echo "Containerd will be used as the container runtime interface"
101         ansible-playbook $verbose -i \
102             $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
103             -e $kud_kata_override_variables -e cluster_name=$cluster_name \
104             --become --become-user=root | \
105             tee $cluster_log/setup-kubernetes.log
106         #Install Kata Containers in containerd scenario
107         ansible-playbook $verbose -i \
108             $kud_inventory -e "base_dest=$HOME" \
109             $kud_playbooks/configure-kata.yml | \
110             tee $cluster_log/setup-kata.log
111     else
112         echo "Only Docker or Containerd are supported container runtimes"
113         exit 1
114     fi
115
116     # Configure environment
117     # Requires kubeconfig_localhost and kubectl_localhost to be true
118     # in inventory/group_vars/k8s-cluster.yml
119     mkdir -p $HOME/.kube
120     cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config
121     if !(which kubectl); then
122         cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
123     fi
124 }
125
126 # install_addons() - Install Kubenertes AddOns
127 function install_addons {
128     if [ ${1:+1} ]; then
129         local plugins_name="$1"
130         echo "additional addons plugins $1"
131     else
132         local plugins_name=""
133         echo "no additional addons pluigns"
134     fi
135
136     source /etc/environment
137     echo "Installing Kubernetes AddOns"
138     ansible-galaxy install $verbose -r \
139         $kud_infra_folder/galaxy-requirements.yml --ignore-errors
140
141     ansible-playbook $verbose -i \
142         $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml \
143         | tee $cluster_log/setup-kud.log
144
145     kud_addons="${KUD_ADDONS:-} ${plugins_name}"
146
147     for addon in ${kud_addons}; do
148         echo "Deploying $addon using configure-$addon.yml playbook.."
149         ansible-playbook $verbose -i \
150             $kud_inventory -e "base_dest=$HOME" \
151             $kud_playbooks/configure-${addon}.yml | \
152             tee $cluster_log/setup-${addon}.log
153     done
154
155     echo "Run the test cases if testing_enabled is set to true."
156     if [[ "${testing_enabled}" == "true" ]]; then
157         failed_kud_tests=""
158         # Run Kata test first if Kata was installed
159         if [ "$container_runtime" == "containerd" ]; then
160             #Install Kata webhook for test pods
161             ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
162                 -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
163                 $kud_playbooks/configure-kata-webhook.yml \
164                 --become --become-user=root | \
165                 sudo tee $cluster_log/setup-kata-webhook.log
166             kata_webhook_deployed=true
167             pushd $kud_tests
168             bash kata.sh || failed_kud_tests="${failed_kud_tests} kata"
169             popd
170         fi
171         #Run other plugin tests
172         for addon in ${kud_addons}; do
173             pushd $kud_tests
174             bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
175             case $addon in
176                 "onap4k8s" )
177                     echo "Test the onap4k8s plugin installation"
178                     for functional_test in plugin_edgex plugin_fw plugin_eaa; do
179                         bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
180                     done
181                     ;;
182                 "emco" )
183                     echo "Test the emco plugin installation"
184                     # TODO plugin_fw_v2 requires virtlet and a patched multus to succeed
185                     # for functional_test in plugin_fw_v2; do
186                     #     bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
187                     # done
188                     ;;
189             esac
190             popd
191         done
192         # Remove Kata webhook if user didn't want it permanently installed
193         if ! [ "$enable_kata_webhook" == "true" ] && [ "$kata_webhook_deployed" == "true" ]; then
194             ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
195                 -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
196                 $kud_playbooks/configure-kata-webhook-reset.yml \
197                 --become --become-user=root | \
198                 sudo tee $cluster_log/kata-webhook-reset.log
199             kata_webhook_deployed=false
200         fi
201         if [[ ! -z "$failed_kud_tests" ]]; then
202             echo "Test cases failed:${failed_kud_tests}"
203             return 1
204         fi
205     fi
206
207     # Check if Kata webhook should be installed and isn't already installed
208     if [ "$enable_kata_webhook" == "true" ] && ! [ "$kata_webhook_deployed" == "true" ]; then
209         ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
210             -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
211             $kud_playbooks/configure-kata-webhook.yml \
212             --become --become-user=root | \
213             sudo tee $cluster_log/setup-kata-webhook.log
214     fi
215
216     echo "Add-ons deployment complete..."
217 }
218
219 function master_ip {
220     kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}'
221 }
222
223 # Copy installation artifacts to be usable in host running Ansible
224 function install_host_artifacts {
225     local -r cluster_name=$1
226     local -r host_dir="/opt/kud/multi-cluster"
227     local -r host_addons_dir="${host_dir}/addons"
228     local -r host_artifacts_dir="${host_dir}/${cluster_name}/artifacts"
229
230     for addon in cdi cdi-operator cpu-manager kubevirt kubevirt-operator multus-cni node-feature-discovery ovn4nfv ovn4nfv-network qat-device-plugin sriov-network sriov-network-operator; do
231         mkdir -p ${host_addons_dir}/${addon}/{helm,profile}
232         cp -r ${kud_infra_folder}/helm/${addon} ${host_addons_dir}/${addon}/helm
233         cp -r ${kud_infra_folder}/profiles/${addon}/* ${host_addons_dir}/${addon}/profile
234         tar -czf ${host_addons_dir}/${addon}.tar.gz -C ${host_addons_dir}/${addon}/helm .
235         tar -czf ${host_addons_dir}/${addon}_profile.tar.gz -C ${host_addons_dir}/${addon}/profile .
236     done
237
238     mkdir -p ${host_addons_dir}/tests
239     for test in _common _common_test _functions topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk; do
240         cp ${kud_tests}/${test}.sh ${host_addons_dir}/tests
241     done
242     cp ${kud_tests}/plugin_fw_v2.sh ${host_addons_dir}/tests
243     cp ${kud_tests}/plugin_fw_v2.yaml ${host_addons_dir}/tests
244     cp -r ${kud_tests}/../demo/composite-firewall ${host_addons_dir}/tests
245
246     mkdir -p ${host_artifacts_dir}
247     cp -rf ${kud_inventory_folder}/artifacts/* ${host_artifacts_dir}
248
249     mkdir -p ${host_artifacts_dir}/addons
250     for yaml in ${kud_infra_folder}/emco/examples/*.yaml; do
251         cp ${yaml} ${host_artifacts_dir}/addons
252     done
253     for template in addons/*.tmpl; do
254         CLUSTER_NAME="${cluster_name}" \
255         HOST_IP="$(master_ip)" \
256         KUBE_PATH="${host_artifacts_dir}/admin.conf" \
257         PACKAGES_PATH="${host_addons_dir}" \
258         envsubst <${template} >${host_artifacts_dir}/${template%.tmpl}
259     done
260 }
261
262 # _print_kubernetes_info() - Prints the login Kubernetes information
263 function _print_kubernetes_info {
264     if ! $(kubectl version &>/dev/null); then
265         return
266     fi
267
268     # Expose Dashboard using NodePort
269     node_port=30080
270     KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" \
271         kubectl -n kube-system edit service kubernetes-dashboard
272     KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" \
273         kubectl -n kube-system edit service kubernetes-dashboard
274
275     printf "Kubernetes Info\n===============\n" > $k8s_info_file
276     echo "Dashboard URL: https://$(master_ip):$node_port" >> $k8s_info_file
277     echo "Admin user: kube" >> $k8s_info_file
278     echo "Admin password: secret" >> $k8s_info_file
279 }
280
281 verbose=""
282 if [[ -n "${KUD_DEBUG:-}" ]]; then
283     set -o xtrace
284     verbose="-vvv"
285 fi
286
287 # Configuration values
288 dest_folder=/opt
289 kud_folder=${INSTALLER_DIR}
290 kud_infra_folder=$kud_folder/../../deployment_infra
291 kud_playbooks=$kud_infra_folder/playbooks
292 kud_tests=$kud_folder/../../tests
293 k8s_info_file=$kud_folder/k8s_info.log
294 testing_enabled=${KUD_ENABLE_TESTS:-false}
295 container_runtime=${CONTAINER_RUNTIME:-docker}
296 enable_kata_webhook=${ENABLE_KATA_WEBHOOK:-false}
297 kata_webhook_runtimeclass=${KATA_WEBHOOK_RUNTIMECLASS:-kata-qemu}
298 kata_webhook_deployed=false
299 # For containerd the etcd_deployment_type: docker is the default and doesn't work.
300 # You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
301 # See https://github.com/kubernetes-sigs/kubespray/issues/5713
302 kud_kata_override_variables="container_manager=containerd \
303     -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs"
304
305 mkdir -p /opt/csar
306 export CSAR_DIR=/opt/csar
307
308 function install_pkg {
309     install_prerequisites
310     install_kubespray
311 }
312
313 function install_cluster {
314     version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
315         awk -F ': ' '{print $2}')
316     export ANSIBLE_CONFIG=$dest_folder/kubespray-$version/ansible.cfg
317     install_k8s $1
318     if [ ${2:+1} ]; then
319         echo "install default addons and $2"
320         install_addons "$2"
321     else
322         install_addons
323     fi
324     echo "installed the addons"
325
326     install_host_artifacts $1
327
328     _print_kubernetes_info
329 }
330
331 function usage {
332     echo "installer usage:"
333     echo "./installer.sh --install_pkg - Install the required softwarepackage"
334     echo "./installer.sh --cluster <cluster name> \
335 - Install k8s cluster with default plugins"
336     echo "./installer.sh --cluster <cluster name> \
337 --plugins <plugin_1 plugin_2> - Install k8s cluster with default plugins \
338 and additional plugins such as onap4k8s."
339 }
340
341 if [ $# -eq 0 ]; then
342     echo "Error: No arguments supplied"
343     usage
344     exit 1
345 fi
346
347 if [ -z "$1" ]; then
348     echo "Error: Null argument passed"
349     usage
350     exit 1
351 fi
352
353 if [ "$1" == "--install_pkg" ]; then
354     export kud_inventory_folder=$kud_folder/inventory
355     kud_inventory=$kud_inventory_folder/hosts.ini
356     install_pkg
357     echo "install pkg"
358     exit 0
359 fi
360
361 if [ "$1" == "--cluster" ]; then
362     if [ -z "${2-}"  ]; then
363         echo "Error: Cluster name is null"
364         usage
365         exit 1
366     fi
367
368     cluster_name=$2
369     kud_multi_cluster_path=/opt/kud/multi-cluster
370     cluster_path=$kud_multi_cluster_path/$cluster_name
371     echo $cluster_path
372     if [ ! -d "${cluster_path}" ]; then
373         echo "Error: cluster_path ${cluster_path} doesn't exit"
374         usage
375         exit 1
376     fi
377
378     cluster_log=$kud_multi_cluster_path/$cluster_name/log
379     export kud_inventory_folder=$kud_folder/inventory/$cluster_name
380     kud_inventory=$kud_inventory_folder/hosts.ini
381
382     mkdir -p $kud_inventory_folder
383     mkdir -p $cluster_log
384     cp $kud_multi_cluster_path/$cluster_name/hosts.ini $kud_inventory_folder/
385     cp -rf $kud_folder/inventory/group_vars $kud_inventory_folder/
386
387     if [ ${3:+1} ]; then
388         if [ "$3" == "--plugins" ]; then
389             if [ -z "${4-}"  ]; then
390                 echo "Error: plugins arguments is null; Refer the usage"
391                 usage
392                 exit 1
393             fi
394             plugins_name=${@:4:$#}
395             install_cluster $cluster_name "$plugins_name"
396             exit 0
397         else
398             echo "Error: cluster argument should have plugins; \
399                 Refer the usage"
400             usage
401             exit 1
402         fi
403     fi
404     install_cluster $cluster_name
405     exit 0
406 fi
407
408
409 echo "Error: Refer the installer usage"
410 usage
411 exit 1