--- /dev/null
+#Openstack specific configuration running on instances.
+#Get volumes.
+- name: "get volume info"
+  set_fact:
+    volumes: "{{ (hostvars['localhost'].heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'volumes') | list).0.output_value[inventory_hostname] | default([]) }}"
+- name: "Configure volumes"
+  include_tasks: configure/volume.yml
+  vars:
+    volume_id: "{{ item[0] }}"
+    mountpoint: "{{ item[1] }}"
+  loop: "{{ volumes }}"
 
--- /dev/null
+#Configure a single openstack volume.
+- name: "Set volume path"
+  set_fact:
+    volume_path: "/dev/disk/by-id/virtio-{{ volume_id | truncate(20, True, '') }}"
+- name: "Set partition path"
+  set_fact:
+    partition_path: "{{ volume_path }}-part1"
+- name: "Wait for volume"
+  #We do not do it normally, because we want to trigger udev (workaround for some bugs).
+  shell: "udevadm trigger && udevadm settle && [[ -b {{ volume_path }} ]]"
+  register: result
+  retries: 30
+  delay: 10
+  until: result.rc == 0
+- name: "Partition volume"
+  parted:
+    device: "{{ volume_path }}"
+    number: 1
+    label: msdos
+    flags: boot
+    part_type: primary
+    state: present
+- name: "Wait for partition to appear"
+  stat:
+    path: "{{ partition_path }}"
+    follow: true
+  register: part_stat
+  delay: 1
+  retries: 5
+  until: part_stat.stat.isblk is defined and part_stat.stat.isblk
+- name: "Create xfs filesystem on volume"
+  filesystem:
+    dev: "{{ partition_path }}"
+    type: xfs
+- name: "Ensure that the mountpoint exists"
+  file:
+    path: "{{ mountpoint }}"
+    owner: root
+    group: root
+    mode: 0755
+    state: directory
+- name: "Mount filesystem"
+  mount:
+    src: "{{ partition_path }}"
+    path: "{{ mountpoint }}"
+    fstype: xfs
+    state: mounted
 
--- /dev/null
+#Tasks for stack redeployment.
+#Delete the heat stack before deployment.
+- name: "delete deployment to force redeploy"
+  os_stack:
+    auth: "{{ os_auth }}"
+    auth_type: token
+    name: "{{ stack_name }}"
+    state: absent
+#Deploy heat stack with infrastructure.
+- name: "Deploy the infrastructure via heat"
+  os_stack:
+    auth: "{{ os_auth }}"
+    auth_type: token
+    name: "{{ stack_name }}"
+    template: "heat/installer.yaml"
+    state: present
+    environment:
+      - "heat/installer.env"
+    parameters:
+      num_nodes: "{{ num_nodes }}"
+      public_network_name: "{{ public_network }}"
+      external_subnet_cidr: "{{ external_subnet_cidr }}"
+      subnet_cidr: "{{ subnet_cidr }}"
+      subnet_range_start: "{{ subnet_range_start }}"
+      subnet_range_end: "{{ subnet_range_end }}"
+      router_addr: "{{ router_addr }}"
+      auth_key: "{{ auth_public_key }}"
+      image_name: "{{ image_name }}"
+      node_flavor_name: "{{ node_flavor_name }}"
+      infra_flavor_name: "{{ infra_flavor_name }}"
+      installer_flavor_name: "{{ installer_flavor_name }}"
+      node_ip: "{{ floating_ips_by_address[first_node_ip].id }}"
+      infra_ip: "{{ floating_ips_by_address[infra_ip].id }}"
+      installer_ip: "{{ floating_ips_by_address[installer_ip].id }}"
+    wait: true
+  register: heat_stack
 
--- /dev/null
+---
+#This mode expects some variables, and deploys infrastructure on open stack.
+#Execute prerequisites.
+- include_tasks: deploy/prereq.yml
+#Deploy stack.
+- include_tasks: deploy/heat.yml
+#Register instances in inventory.
+- include_tasks: deploy/register_instances.yml
 
--- /dev/null
+#Prerequisite tasks before stack deployment.
+#Authenticate to cloud.
+- name: "authenticate to cloud"
+  os_auth:
+    auth:
+      auth_url: "{{ os_auth_url }}"
+      username: "{{ os_username }}"
+      password: "{{ os_password }}"
+      domain_name: "{{ os_domain_name }}"
+      project_name: "{{ os_project_name }}"
+      project_domain_name: "{{ os_domain_name }}"
+#Will use the token from this point on.
+- name: "set token"
+  set_fact:
+    os_auth:
+      auth_url: "{{ os_auth_url }}"
+      token: "{{ auth_token }}"
+      project_name: "{{ os_project_name }}"
+      project_domain_name: "{{ os_domain_name }}"
+#Retrieve floating ip info.
+- name: "get floating ip facts"
+  os_floating_ips_facts:
+    auth: "{{ os_auth }}"
+    auth_type: token
+    network: "{{ public_network }}"
+#Group floating ips by ip address to allow looking them up.
+- name: "group floating ips by address"
+  set_fact:
+    floating_ips_by_address: "{{ floating_ips_by_address | default({}) | combine({item.floating_ip_address: item}) }}"
+  loop: "{{ query('items', openstack_floating_ips) }}"
+- name: "fail if required floating ips do not exist"
+  fail: msg="The required floating ips do not exist"
+  when: "(not (first_node_ip in floating_ips_by_address)
+    or not (infra_ip in floating_ips_by_address)
+    or not (installer_ip in floating_ips_by_address))"
+#Get a ssh public key to be passed to heat, it requires ssh-keygen with -y option.
+- name: "Retrieve public key from ssh private key"
+  command: "ssh-keygen -y -f {{ hostvars['installer'].ansible_private_key_file }}"
+  register: public_key_generation
+- set_fact:
+    auth_public_key: "{{ public_key_generation.stdout }}"
 
--- /dev/null
+#Register instances as hosts in inventory.
+#Installer and infra are statically registered.
+#Register node instances dynamically.
+- name: "Register node instances"
+  add_host:
+    name: "node{{ item[0] }}"
+    groups: nodes
+    ansible_host: "{{ item[1] }}"
+  loop: "{{ query('indexed_items', (heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'node_ips') | list).0.output_value) }}"
 
--- /dev/null
+- include_tasks: "{{ mode }}/main.yml"