2 #############################################################################
3 # Copyright © 2019 Bell.
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 #############################################################################
18 # This installation is for an RKE install of kubernetes
19 # after this run the standard oom install
20 # this installation can be run on amy ubuntu 16.04 VM, RHEL 7.6 (root only), physical or cloud azure/aws host
21 # https://wiki.onap.org/display/DW/OOM+RKE+Kubernetes+Deployment
22 # source from https://jira.onap.org/browse/OOM-1598
25 # RKE 0.1.16 Kubernetes 1.11.6, kubectl 1.11.6, Helm 2.9.1, Docker 18.06
26 # single node install, HA pending
32 sudo ./rke_setup.sh -b dublin -s rke.onap.cloud -e onap -l amdocs -v true
34 -b [branch] : branch = master or dublin (required)
35 -s [server] : server = IP or DNS name (required)
36 -e [environment] : use the default (onap)
37 -k [key] : ssh key name
38 -l [username] : login username account (use ubuntu for example)
45 if [ "$BRANCH" == "casablanca" ]; then
48 KUBECTL_VERSION=1.11.3
54 KUBECTL_VERSION=1.11.6
59 # copy your private ssh key and cluster.yml file to the vm
61 #sudo cp ~/.ssh/onap_rsa .
62 #sudo chmod 777 onap_rsa
63 #scp onap_rsa ubuntu@192.168.241.132:~/
65 #sudo chmod 400 onap_rsa
66 #sudo cp onap_rsa ~/.ssh
67 # make sure public key is insetup correctly in
68 # sudo vi ~/.ssh/authorized_keys
70 echo "please supply your ssh key as provided by the -k keyname - it must be be chmod 400 and chown user:user in ~/.ssh/"
71 echo "The RKE version specific cluster.yaml is already integrated in this script for 0.1.15/0.1.16 no need for below generation..."
72 echo "rke config --name cluster.yml"
74 echo "address: $SERVER"
75 echo "user: $USERNAME"
76 echo "ssh_key_path: $SSHPATH_PREFIX/$SSHKEY"
81 if [ "$RKE_VERSION" == "0.1.16" ]; then
83 HYPERCUBE=1.11.6-rancher1
84 POD_INFRA_CONTAINER=rancher/pause-amd64:3.1
88 HYPERCUBE=1.11.3-rancher1
89 POD_INFRA_CONTAINER=gcr.io.google_containers/pause-amd64:3.1
92 cat > cluster.yml <<EOF
93 # generated from rke_setup.sh
102 hostname_override: ""
104 docker_socket: /var/run/docker.sock
106 ssh_key_path: $SSHPATH_PREFIX/$SSHKEY
127 service_cluster_ip_range: 10.43.0.0/16
128 service_node_port_range: ""
129 pod_security_policy: false
135 cluster_cidr: 10.42.0.0/16
136 service_cluster_ip_range: 10.43.0.0/16
148 cluster_domain: cluster.local
149 infra_container_image: ""
150 cluster_dns_server: 10.43.0.10
165 etcd: rancher/coreos-etcd:v3.2.18
166 alpine: rancher/rke-tools:v$RKETOOLS
167 nginx_proxy: rancher/rke-tools:v$RKETOOLS
168 cert_downloader: rancher/rke-tools:v$RKETOOLS
169 kubernetes_services_sidecar: rancher/rke-tools:v$RKETOOLS
170 kubedns: rancher/k8s-dns-kube-dns-amd64:1.14.10
171 dnsmasq: rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10
172 kubedns_sidecar: rancher/k8s-dns-sidecar-amd64:1.14.10
173 kubedns_autoscaler: rancher/cluster-proportional-autoscaler-amd64:1.0.0
174 kubernetes: rancher/hyperkube:v$HYPERCUBE
175 flannel: rancher/coreos-flannel:v0.10.0
176 flannel_cni: rancher/coreos-flannel-cni:v0.3.0
177 calico_node: rancher/calico-node:v3.1.3
178 calico_cni: rancher/calico-cni:v3.1.3
179 calico_controllers: ""
180 calico_ctl: rancher/calico-ctl:v2.0.0
181 canal_node: rancher/calico-node:v3.1.3
182 canal_cni: rancher/calico-cni:v3.1.3
183 canal_flannel: rancher/coreos-flannel:v0.10.0
184 wave_node: weaveworks/weave-kube:2.1.2
185 weave_cni: weaveworks/weave-npc:2.1.2
186 pod_infra_container: $POD_INFRA_CONTAINER
187 ingress: rancher/nginx-ingress-controller:0.16.2-rancher1
188 ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4
189 metrics_server: rancher/metrics-server-amd64:v0.2.1
190 ssh_key_path: $SSHPATH
191 ssh_agent_auth: false
195 ignore_docker_version: false
196 kubernetes_version: "$KUBERNETES_VERSION"
197 private_registries: []
221 echo "Installing on ${SERVER} for ${BRANCH}: RKE: ${RKE_VERSION} Kubectl: ${KUBECTL_VERSION} Helm: ${HELM_VERSION} Docker: ${DOCKER_VERSION} username: ${USERNAME}"
222 sudo echo "127.0.0.1 ${SERVER}" >> /etc/hosts
223 echo "Install docker - If you must install as non-root - comment out the docker install below - run it separately, run the user mod, logout/login and continue this script"
224 curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
225 sudo usermod -aG docker $USERNAME
228 sudo wget https://github.com/rancher/rke/releases/download/v$RKE_VERSION/rke_linux-amd64
229 mv rke_linux-amd64 rke
231 sudo mv ./rke /usr/local/bin/rke
233 echo "Install make - required for beijing+ - installed via yum groupinstall Development Tools in RHEL"
235 sudo apt-get install make -y
237 sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
238 sudo chmod +x ./kubectl
239 sudo mv ./kubectl /usr/local/bin/kubectl
241 wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
242 sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
243 sudo mv linux-amd64/helm /usr/local/bin/helm
245 echo "Bringing RKE up - using supplied cluster.yml"
247 echo "wait 2 extra min for the cluster"
251 echo "copy kube_config_cluter.yaml generated - to ~/.kube/config"
252 sudo cp kube_config_cluster.yml ~/.kube/config
253 # avoid using sudo for kubectl
254 sudo chmod 777 ~/.kube/config
255 echo "Verify all pods up on the kubernetes system - will return localhost:8080 until a host is added"
256 echo "kubectl get pods --all-namespaces"
257 kubectl get pods --all-namespaces
258 echo "install tiller/helm"
259 kubectl -n kube-system create serviceaccount tiller
260 kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
261 helm init --service-account tiller
262 kubectl -n kube-system rollout status deploy/tiller-deploy
263 echo "upgrade server side of helm in kubernetes"
264 if [ "$USERNAME" == "root" ]; then
271 if [ "$USERNAME" == "root" ]; then
274 sudo helm init --upgrade
278 echo "verify both versions are the same below"
279 if [ "$USERNAME" == "root" ]; then
284 echo "start helm server"
285 if [ "$USERNAME" == "root" ]; then
292 echo "add local helm repo"
293 if [ "$USERNAME" == "root" ]; then
294 helm repo add local http://127.0.0.1:8879
297 sudo helm repo add local http://127.0.0.1:8879
300 echo "To enable grafana dashboard - do this after running cd.sh which brings up onap - or you may get a 302xx port conflict"
301 echo "kubectl expose -n kube-system deployment monitoring-grafana --type=LoadBalancer --name monitoring-grafana-client"
302 echo "to get the nodeport for a specific VM running grafana"
303 echo "kubectl get services --all-namespaces | grep graf"
307 kubectl get services --all-namespaces
308 kubectl get pods --all-namespaces
317 SSHPATH_PREFIX=~/.ssh
319 while getopts ":b:s:e:u:l:k:v" PARAM; do
350 if [[ -z $BRANCH ]]; then
355 install_onap $BRANCH $SERVER $ENVIRON $USERNAME $SSHPATH_PREFIX $SSHKEY $VALIDATE