--- /dev/null
+output
+examples/values.yaml
--- /dev/null
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+OUTPUT_DIR := $(ROOT_DIR)/output
+PACKAGE_DIR := $(OUTPUT_DIR)/packages
+
+ADDONS := multus-cni ovn4nfv node-feature-discovery sriov-network-operator sriov-network qat-device-plugin cpu-manager
+
+.PHONY: $(ADDONS)
+
+all: $(ADDONS)
+
+$(ADDONS):
+ @echo "\n[$@]"
+ @make chart-$@
+ @make profile-$@
+
+dep-%:
+ @if grep "^dependencies:" ../helm/$*/Chart.yaml; then helm dep up ../helm/$*; fi
+
+lint-%: dep-%
+ @helm lint ../helm/$*
+
+chart-%: lint-%
+ @mkdir -p $(PACKAGE_DIR)
+ @tar -czf $(PACKAGE_DIR)/$*.tar.gz -C ../helm $*
+
+profile-%:
+ @mkdir -p $(PACKAGE_DIR)
+ @tar -czf $(PACKAGE_DIR)/$*_profile.tar.gz -C ../profiles/$* .
+
+clean:
+ @rm -rf $(OUTPUT_DIR)
+
+%:
+ @:
--- /dev/null
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#creating composite app entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps
+metadata :
+ name: {{ .CompositeApp }}
+ description: "KUD addons"
+spec:
+ version: v1
+
+{{- range $index, $addon := .Apps }}
+---
+#adding app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/apps
+metadata :
+ name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}.tar.gz
+{{- end }}
+
+---
+#creating composite profile entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/composite-profiles
+metadata :
+ name: {{ .CompositeProfile }}
+
+{{- range $index, $addon := .Apps }}
+---
+#adding app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/composite-profiles/{{ $.CompositeProfile }}/profiles
+metadata :
+ name: {{ $addon }}-profile
+spec:
+ app-name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}_profile.tar.gz
+{{- end }}
+
+---
+#create deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups
+metadata :
+ name: {{ .DeploymentIntentGroup }}
+ description: "description"
+spec:
+ profile: {{ .CompositeProfile }}
+ version: r1
+ logical-cloud: {{ .LogicalCloud }}
+ override-values: []
+
+---
+#create intent in deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/intents
+metadata :
+ name: {{ .DeploymentIntent }}
+spec:
+ intent:
+ genericPlacementIntent: {{ .GenericPlacementIntent }}
+
+---
+#create the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/generic-placement-intents
+metadata :
+ name: {{ .GenericPlacementIntent }}
+spec:
+ logical-cloud: {{ .LogicalCloud }}
+
+{{- range $index, $addon := .Apps }}
+---
+#add the app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/deployment-intent-groups/{{ $.DeploymentIntentGroup }}/generic-placement-intents/{{ $.GenericPlacementIntent }}/app-intents
+metadata:
+ name: {{ $addon }}-placement-intent
+spec:
+ app-name: {{ $addon }}
+ intent:
+ allOf:
+ - provider-name: {{ $.ClusterProvider }}
+ cluster-label-name: {{ $.ClusterLabel }}
+{{- end }}
+
+---
+#Approve
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/approve
+
+---
+#Instantiate
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/instantiate
--- /dev/null
+#### SPDX-License-Identifier: Apache-2.0
+#### Copyright (c) 2021 Intel Corporation
+
+# Installing KUD addons with emcoctl
+
+This folder contains KUD addons to deploy with EMCO. The example
+configuration assumes one edge cluster to deploy to. EMCO needs to be
+installed on the cluster before deploying these addons and emcoctl
+needs to be installed and configured for the edge cluster.
+
+1. Multus CNI
+2. OVN4NFV K8s Plugin
+3. Node Feature Discovery
+4. SR-IOV Network Operator
+5. SR-IOV Network
+6. QuickAssist Technology (QAT) Device Plugin
+7. CPU Manager for Kubernetes
+
+## Setup environment to deploy addons
+
+1. Export environment variables
+ - KUBE_PATH: where the kubeconfig for edge cluster is located, and
+ - HOST_IP: IP address of the cluster where EMCO is installed.
+
+#### NOTE: For HOST_IP, assuming here that nodeports are used to access all EMCO services both from outside and between the EMCO services.
+
+2. Customize values.yaml.
+
+ `$ envsubst < values.yaml.example > values.yaml`
+ `$ envsubst < values-resources.yaml.example > values-resources.yaml`
+
+## Create prerequisites to deploy addons
+
+Apply prerequisites.yaml. This creates controllers, one project, one
+cluster, and default logical cloud. This step is required to be done
+only once.
+
+ `$ emcoctl apply -f prerequisites.yaml -v values.yaml`
+
+## Deploying addons
+
+Apply composite-app.yaml. This deploys the addons listed in the `Apps`
+value.
+
+ `$ emcoctl apply -f ../output/composite-app.yaml -v values.yaml`
+ `$ emcoctl apply -f ../output/composite-app.yaml -v values-resources.yaml`
+
+## Cleanup
+
+1. Delete addons.
+
+ `$ emcoctl delete -f ../output/composite-app.yaml -v values-resources.yaml`
+ `$ emcoctl delete -f ../output/composite-app.yaml -v values.yaml`
+
+2. Cleanup prerequisites.
+
+ `$ emcoctl delete -f prerequisites.yaml -v values.yaml`
+
+#### NOTE: Known issue: Deletion of the resources fails sometimes as some resources can't be deleted before others are deleted. This can happen due to timing issue. In that case try deleting again and the deletion should succeed.
--- /dev/null
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#create project
+version: emco/v2
+resourceContext:
+ anchor: projects
+metadata :
+ name: {{ .ProjectName }}
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: rsync
+spec:
+ host: {{ .HostIP }}
+ port: {{ .RsyncPort }}
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: gac
+spec:
+ host: {{ .HostIP }}
+ port: {{ .GacPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: ovnaction
+spec:
+ host: {{ .HostIP }}
+ port: {{ .OvnPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: dtc
+spec:
+ host: {{ .HostIP }}
+ port: {{ .DtcPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating cluster provider
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers
+metadata :
+ name: {{ .ClusterProvider }}
+
+---
+#creating cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{ .ClusterProvider }}/clusters
+metadata :
+ name: {{ .Cluster1 }}
+file:
+ {{ .KubeConfig }}
+
+---
+#Add label cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{ .ClusterProvider }}/clusters/{{ .Cluster1 }}/labels
+label-name: {{ .ClusterLabel }}
+
+---
+#create default logical cloud with admin permissions
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds
+metadata:
+ name: {{ .LogicalCloud }}
+spec:
+ level: "0"
+
+---
+#add cluster reference to logical cloud
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/cluster-references
+metadata:
+ name: lc-cl-1
+spec:
+ cluster-provider: {{ .ClusterProvider }}
+ cluster-name: {{ .Cluster1 }}
+ loadbalancer-ip: "0.0.0.0"
+
+---
+#instantiate logical cloud
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/instantiate
+
--- /dev/null
+HostIP: $HOST_IP
+KubeConfig: $KUBE_PATH
+PackagesPath: $PWD/../output/packages
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+CompositeApp: addon-resources
+CompositeProfile: addon-resources-profile
+DeploymentIntentGroup: addon-resources-deployment-intent-group
+DeploymentIntent: addon-resources-deployment-intent
+GenericPlacementIntent: addon-resources-placement-intent
+Apps:
+- sriov-network
--- /dev/null
+HostIP: $HOST_IP
+KubeConfig: $KUBE_PATH
+PackagesPath: $PWD/../output/packages
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+CompositeApp: addons
+CompositeProfile: addons-profile
+DeploymentIntentGroup: addons-deployment-intent-group
+DeploymentIntent: addons-deployment-intent
+GenericPlacementIntent: addons-placement-intent
+Apps:
+- multus-cni
+- ovn4nfv
+- node-feature-discovery
+- sriov-network-operator
+- qat-device-plugin
+- cpu-manager
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+---
+version: v1
+type:
+ values: "override_values.yaml"
--- /dev/null
+# Installing KUD addons with emcoctl
+
+1. Customize values.yaml and values-resources.yaml as needed
+
+To create a customized profile for a specific addon, edit the profile
+as needed, and then (for example, cpu-manager):
+
+```
+ tar -czf /opt/kud/multi-cluster/addons/cpu-manager.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/helm .
+ tar -czf /opt/kud/multi-cluster/addons/collectd_profile.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/profile .
+```
+
+2. Create prerequisites to deploy addons
+
+Apply prerequisites.yaml. This step is optional. If there are
+existing resources in the cluster, it is sufficient to customize
+values.yaml with the values of those resources. The supplied
+prequisites.yaml creates controllers, one project, one cluster, and
+one logical cloud.
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml\`
+
+3. Deploy addons
+
+Apply addons.yaml. This deploys the addons listed in the \`Addons\`
+value in values.yaml.
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml\`
+
+# Uninstalling KUD addons with emcoctl
+
+1. Delete addons
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml\`
+
+2. Cleanup prerequisites
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml\`
+
+#### NOTE: Known issue: deletion of the resources fails sometimes as
+some resources can't be deleted before others are deleted. This can
+happen due to timing issue. In that case try deleting again and the
+deletion should succeed.
--- /dev/null
+HostIP: ${HOST_IP}
+KubeConfig: ${KUBE_PATH}
+PackagesPath: ${PACKAGES_PATH}
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+Apps:
+- sriov-network
+CompositeApp: addon-resources
+CompositeProfile: addon-resources-profile
+DeploymentIntentGroup: addon-resources-deployment-intent-group
+DeploymentIntent: addon-resources-deployment-intent
+GenericPlacementIntent: addon-resources-placement-intent
--- /dev/null
+HostIP: ${HOST_IP}
+KubeConfig: ${KUBE_PATH}
+PackagesPath: ${PACKAGES_PATH}
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+Apps:
+- multus-cni
+- ovn4nfv
+- node-feature-discovery
+- sriov-network-operator
+- qat-device-plugin
+- cpu-manager
+CompositeApp: addons
+CompositeProfile: addons-profile
+DeploymentIntentGroup: addons-deployment-intent-group
+DeploymentIntent: addons-deployment-intent
+GenericPlacementIntent: addons-placement-intent
set -ex
INSTALLER_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
+KUD_ADDONS=""
function install_prerequisites {
#install package for docker images
find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
apt-get update
apt-get install -y curl vim wget git \
- software-properties-common python-pip sudo
+ software-properties-common python-pip sudo gettext-base
add-apt-repository -y ppa:longsleep/golang-backports
apt-get update
apt-get install -y golang-go rsync
tee $cluster_log/setup-kud.log
# The order of KUD_ADDONS is important: some plugins (sriov, qat)
# require nfd to be enabled.
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
+ for addon in $KUD_ADDONS $plugins_name; do
echo "Deploying $addon using configure-$addon.yml playbook.."
ansible-playbook $verbose -i \
$kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | \
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
failed_kud_tests=""
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
+ for addon in $KUD_ADDONS $plugins_name; do
pushd $kud_tests
bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
case $addon in
;;
"emco" )
echo "Test the emco plugin installation"
- for functional_test in plugin_fw_v2; do
- bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
- done
+ # TODO plugin_fw_v2 requires virtlet and a patched multus to succeed
+ # for functional_test in plugin_fw_v2; do
+ # bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
+ # done
;;
esac
popd
echo "Add-ons deployment complete..."
}
+function master_ip {
+ kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}'
+}
+
+# Copy installation artifacts to be usable in host running Ansible
+function install_host_artifacts {
+ local -r cluster_name=$1
+ local -r host_dir="/opt/kud/multi-cluster"
+ local -r host_addons_dir="${host_dir}/addons"
+ local -r host_artifacts_dir="${host_dir}/${cluster_name}/artifacts"
+
+ for addon in cpu-manager multus-cni node-feature-discovery ovn4nfv qat-device-plugin sriov-network sriov-network-operator; do
+ mkdir -p ${host_addons_dir}/${addon}/{helm,profile}
+ cp -r ${kud_infra_folder}/helm/${addon} ${host_addons_dir}/${addon}/helm
+ cp -r ${kud_infra_folder}/profiles/${addon}/* ${host_addons_dir}/${addon}/profile
+ tar -czf ${host_addons_dir}/${addon}.tar.gz -C ${host_addons_dir}/${addon}/helm .
+ tar -czf ${host_addons_dir}/${addon}_profile.tar.gz -C ${host_addons_dir}/${addon}/profile .
+ done
+
+ mkdir -p ${host_addons_dir}/tests
+ for test in _common _common_test _functions multus ovn4nfv nfd sriov-network qat cmk; do
+ cp ${kud_tests}/${test}.sh ${host_addons_dir}/tests
+ done
+
+ mkdir -p ${host_artifacts_dir}
+ cp -rf ${kud_inventory_folder}/artifacts/* ${host_artifacts_dir}
+
+ mkdir -p ${host_artifacts_dir}/addons
+ cp ${kud_infra_folder}/emco/examples/prerequisites.yaml ${host_artifacts_dir}/addons
+ cp ${kud_infra_folder}/emco/composite-app.yaml ${host_artifacts_dir}/addons
+ for template in addons/*.tmpl; do
+ CLUSTER_NAME="${cluster_name}" \
+ HOST_IP="$(master_ip)" \
+ KUBE_PATH="${host_artifacts_dir}/admin.conf" \
+ PACKAGES_PATH="${host_addons_dir}" \
+ envsubst <${template} >${host_artifacts_dir}/${template%.tmpl}
+ done
+}
+
# _print_kubernetes_info() - Prints the login Kubernetes information
function _print_kubernetes_info {
if ! $(kubectl version &>/dev/null); then
KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" \
kubectl -n kube-system edit service kubernetes-dashboard
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}')
-
printf "Kubernetes Info\n===============\n" > $k8s_info_file
- echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Dashboard URL: https://$(master_ip):$node_port" >> $k8s_info_file
echo "Admin user: kube" >> $k8s_info_file
echo "Admin password: secret" >> $k8s_info_file
}
fi
echo "installed the addons"
- # Copy installation artifacts to be usable in host running Ansible
- cp -rf $kud_inventory_folder/artifacts \
- /opt/kud/multi-cluster/$cluster_name/
+ install_host_artifacts $1
_print_kubernetes_info
}
KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F ":" '{print $2}')
+ master_ip=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}')
printf "Kubernetes Info\n===============\n" > $k8s_info_file
- echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Dashboard URL: https://$master_ip:$node_port" >> $k8s_info_file
echo "Admin user: kube" >> $k8s_info_file
echo "Admin password: secret" >> $k8s_info_file
}
echo -e "${RED} $msg ---------------------------------------${NC}"
}
+function ssh_cluster {
+ master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
+ ssh -o StrictHostKeyChecking=no ${master_ip} -- "$@"
+}
+
function get_ovn_central_address {
#Reuse OVN_CENTRAL_ADDRESS if available (bypassable by --force flag)
if [[ "${1:-}" != "--force" ]] && [[ -n "${OVN_CENTRAL_ADDRESS:-}" ]]; then
function generate_CRD_for_macvlan_cni {
local csar_id=$1
- local master_name=`route | grep 'default' | awk '{print $8}' |head -n 1`
+ local master_name=$(ssh_cluster route | grep 'default' | awk '{print $8}' |head -n 1)
_checks_args $csar_id
pushd ${CSAR_DIR}/${csar_id}
function generate_CRD_for_ipvlan_cni {
local csar_id=$1
- local master_name=`route | grep 'default' | awk '{print $8}' |head -n 1`
+ local master_name=$(ssh_cluster route | grep 'default' | awk '{print $8}' |head -n 1)
_checks_args $csar_id
pushd ${CSAR_DIR}/${csar_id}