--- /dev/null
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ level: error
+ brackets:
+ max-spaces-inside: 1
+ level: error
+ line-length: disable
+ truthy: disable
--- /dev/null
+---
+- name: Destroy
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ no_log: "{{ not (lookup('env', 'MOLECULE_DEBUG') | bool or molecule_yml.provisioner.log|default(false) | bool) }}"
+ tasks:
+ - name: Destroy molecule instance(s)
+ docker_container:
+ name: "{{ item.name }}"
+ docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
+ state: absent
+ force_kill: "{{ item.force_kill | default(true) }}"
+ # Modification: we want to clean up old volumes.
+ keep_volumes: false
+ register: server
+ with_items: "{{ molecule_yml.platforms }}"
+ async: 7200
+ poll: 0
+
+ - name: Wait for instance(s) deletion to complete
+ async_status:
+ jid: "{{ item.ansible_job_id }}"
+ register: docker_jobs
+ until: docker_jobs.finished
+ retries: 300
+ with_items: "{{ server.results }}"
+
+ - name: Delete docker network(s)
+ docker_network:
+ name: "{{ item }}"
+ docker_host: "{{ item.docker_host | default(lookup('env', 'DOCKER_HOST') or 'unix://var/run/docker.sock') }}"
+ state: absent
+ with_items: "{{ molecule_yml.platforms | molecule_get_docker_networks }}"
--- /dev/null
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+lint:
+ name: yamllint
+platforms:
+ - name: infrastructure-server
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ restart_policy: unless-stopped
+ volumes:
+ - /var/lib/kubelet
+ - /var/lib/docker
+ env:
+ container: docker
+ groups:
+ - infrastructure
+ - kubernetes-control-plane
+ networks:
+ - name: rke
+ purge_networks: true
+
+ - name: kubernetes-node-1
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ restart_policy: unless-stopped
+ env:
+ container: docker
+ volumes:
+ - /var/lib/kubelet
+ - /var/lib/docker
+ groups:
+ - kubernetes
+ networks:
+ - name: rke
+ purge_networks: true
+
+ - name: kubernetes-node-2
+ image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos}:${PREBUILD_DISTRO_VERSION:-centos7.6}
+ pre_build_image: true
+ privileged: true
+ override_command: false
+ restart_policy: unless-stopped
+ env:
+ container: docker
+ volumes:
+ - /var/lib/kubelet
+ - /var/lib/docker
+ groups:
+ - kubernetes
+ networks:
+ - name: rke
+ purge_networks: true
+
+provisioner:
+ name: ansible
+ env:
+ ANSIBLE_ROLES_PATH: ../../../../test/roles
+ ANSIBLE_LIBRARY: ../../../../library
+ inventory:
+ links:
+ group_vars: ../../../../group_vars
+ options:
+ e: "app_data_path=/opt/onap"
+ lint:
+ name: ansible-lint
+scenario:
+ name: default
+verifier:
+ name: testinfra
+ lint:
+ name: flake8
--- /dev/null
+---
+- name: "Set cluster_ip"
+ hosts: all
+ tasks:
+ - name: "Set cluster_ip fact"
+ set_fact:
+ cluster_ip: "{{ ansible_default_ipv4.address }}"
+
+- name: Configure kubernetes cluster (RKE)
+ hosts: infrastructure
+ roles:
+ - role: rke
+ vars:
+ mode: config
+
+- name: Prepare kubernetes nodes (RKE)
+ hosts:
+ - kubernetes
+ - kubernetes-control-plane
+ roles:
+ - role: rke
+ vars:
+ mode: node
+
+- name: Deploy kubernetes cluster (RKE)
+ hosts: infrastructure
+ roles:
+ - role: rke
+ vars:
+ mode: deploy
--- /dev/null
+---
+- name: "Prepare hosts"
+ hosts: all
+ roles:
+ - role: prepare-rke
+ vars:
+ mode: all
+ - prepare-docker-dind
+
+- name: "Infra specific preparations"
+ hosts: infrastructure
+ roles:
+ - role: prepare-rke
+ vars:
+ mode: infra
--- /dev/null
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(
+ 'kubernetes-control-plane')
+
+
+@pytest.mark.parametrize('container_name', [
+ 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'kubelet'])
+def test_container_running(host, container_name):
+ assert host.docker(container_name).is_running
--- /dev/null
+import os
+import pytest
+import json
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('infrastructure')
+
+
+@pytest.mark.parametrize('filename', [
+ '/root/.kube/config',
+ '/opt/onap/cluster/cluster.yml',
+ '/opt/onap/cluster/cluster.rkestate'])
+def test_file_existence(host, filename):
+ assert host.file(filename).exists
+
+
+def test_rke_in_path(host):
+ assert host.find_command('rke') == '/usr/local/bin/rke'
+
+
+def test_rke_version_works(host):
+ # Note that we need to cd to the cluster data dir first, really.
+ assert host.run('cd /opt/onap/cluster && rke version').rc == 0
+
+
+def test_nodes_ready(host):
+ # Retrieve all node names.
+ nodecmdres = host.run('kubectl get nodes -o name')
+ assert nodecmdres.rc == 0
+ nodes = nodecmdres.stdout.split('\n')
+ for node in nodes:
+ assert host.run(
+ 'kubectl wait --timeout=0 --for=condition=ready ' + node).rc == 0
+
+
+def test_pods_ready(host):
+ # Retrieve all pods from all namespaces.
+ # Because we need pod and namespace name, we get full json representation.
+ podcmdres = host.run('kubectl get pods --all-namespaces -o json')
+ assert podcmdres.rc == 0
+ pods = json.loads(podcmdres.stdout)['items']
+ for pod in pods:
+ # Each pod may be either created by a job or not.
+ # In job case they should already be completed
+ # when we are here so we ignore them.
+ namespace = pod['metadata']['namespace']
+ podname = pod['metadata']['name']
+ condition = 'Ready'
+ if len(pod['metadata']['ownerReferences']) == 1 and pod[
+ 'metadata']['ownerReferences'][0]['kind'] == 'Job':
+ continue
+ assert host.run(
+ 'kubectl wait --timeout=120s --for=condition=' + condition + ' -n ' +
+ namespace + ' pods/' + podname).rc == 0
--- /dev/null
+import os
+import pytest
+
+import testinfra.utils.ansible_runner
+
+testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
+ os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('kubernetes')
+
+
+@pytest.mark.parametrize('container_name', [
+ 'etcd', 'kubelet', 'kube-proxy'])
+def test_container_running(host, container_name):
+ assert host.docker(container_name).is_running
template:
src: cluster.yml.j2
dest: "{{ cluster_config_dir }}/cluster.yml"
+ register: cluster_yml
- name: Prepare rke addon manifest (dashboard)
template:
---
+- name: "Check if rke is deployed"
+ command: "rke version"
+ args:
+ chdir: "{{ cluster_config_dir }}"
+ failed_when: false
+ changed_when: false
+ register: rke_deployed
+
- name: Run rke up
command: "{{ rke_bin_dir }}/rke up --config cluster.yml"
args:
chdir: "{{ cluster_config_dir }}"
+ when: rke_deployed.rc != 0 or cluster_yml.changed # noqa 503
- name: Ensure .kube directory is present
file:
--- /dev/null
+---
+#The rke version.
+rke_version: 0.2.0
+#The kubectl version.
+kubectl_version: 1.13.5
--- /dev/null
+#This is needed because login from non root is blocked by default.
+- name: "Allow non root logins"
+ service:
+ name: systemd-user-sessions
+ state: started
+
--- /dev/null
+---
+- name: "Ensure {{ app_data_path }} exists"
+ file:
+ path: "{{ app_data_path }}/downloads"
+ state: directory
+
+- name: "Install rke-{{ rke_version }}"
+ get_url:
+ url: "https://github.com/rancher/rke/releases/download/v{{ rke_version }}/rke_linux-amd64"
+ dest: "{{ app_data_path }}/downloads/rke"
+
+- name: "Install kubectl-{{ kubectl_version }}"
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl"
+ dest: "/usr/local/bin/kubectl"
+ mode: 0755
--- /dev/null
+---
+- include_tasks: "{{ mode }}.yml"