type: string
constraints:
- custom_constraint: ip_addr
+ dns_nameservers:
+ label: "dns resolvers"
+ description: "List of dns resolvers"
+ type: comma_delimited_list
public_network_name:
label: "name of the public network"
description: "Name of the public, internet facing network, also allowing access to company internal hosts"
constraints:
- range: { min: 1 }
description: "must be a positive number"
+ use_volume_for_nfs:
+ type: boolean
+ label: "use volume for nfs storage"
+ description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk"
+ demo_network:
+ label: "demo net id"
+ type: string
+ description: "specifies id of network used for demo usecases"
+ default: ""
+ docker_storage_size:
+ label: "nodes' docker storage size"
+ type: number
+ description: "Size of the volume for the docker storage on nodes"
+conditions:
+ #Condition for nfs volume usage.
+ use_volume_for_nfs: { get_param: use_volume_for_nfs }
resources:
# Security group used to secure access to instances.
secgroup:
allocation_pools:
- { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
gateway_ip: { get_param: router_addr }
+ dns_nameservers: { get_param: dns_nameservers }
ip_version: 4
#A port connected to the private network, taken by router.
routerport:
data: { num_nodes: { get_param: num_nodes } }
#This is number of all nodes + 2 (infra instance and installer)
expression: "$.data.num_nodes + 2"
+ #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.).
+ anti_affinity_group:
+ type: OS::Nova::ServerGroup
+ properties:
+ name: k8s nodes on separate computes
+ policies:
+ - anti-affinity
#Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
nodes:
type: OS::Heat::ResourceGroup
flavor_name: { get_param: node_flavor_name }
notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
security_group: { get_resource: secgroup }
+ demo_network: { get_param: demo_network }
+ docker_storage_size: { get_param: docker_storage_size }
+ scheduler_hints:
+ group: { get_resource: anti_affinity_group }
depends_on: [routercon, instance_wait_handle]
#Nfs storage volume for first node.
nfs_storage:
type: OS::Cinder::Volume
+ condition: use_volume_for_nfs
properties:
name: nfs_storage
size: 50
#Attachment of volume to first node.
nfs_storage_attachment:
type: OS::Cinder::VolumeAttachment
+ condition: use_volume_for_nfs
properties:
instance_uuid: { get_attr: [nodes, "resource.0"] }
volume_id: { get_resource: nfs_storage }
image_name: { get_param: image_name }
notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
security_group: { get_resource: secgroup }
+ scheduler_hints: {}
+ demo_network: { get_param: demo_network }
depends_on: [instance_wait_handle]
#Volume attachment for infra node.
resources_storage_attachment:
subnet: { get_resource: privsubnet }
notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
security_group: { get_resource: secgroup }
+ scheduler_hints: {}
depends_on: instance_wait_handle
#Floating ip for installer.
installer_fip_assoc:
properties:
floatingip_id: { get_param: installer_ip }
port_id: { get_attr: [installer, port_id] }
+ #Map of node volumes, taken from volumes output param.
+ node_volumes:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ #We need yaql transformation to be done on the volume map.
+ value:
+ yaql:
+ data:
+ #This is a map of node number to value of "volumes" attribute, that contains
+ #a list of volumes written as pairs [volumeid, mountpoint].
+ volumes: { get_attr: [nodes, attributes, volumes] }
+ #We need yaql expressions to transform node numbers to node names in the form "node0" and similar.
+ #However we don't need anything more complicated.
+ expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])"
+ #List of infra specific volumes (not a map as above).
+ infra_volumes:
+ type: OS::Heat::Value
+ properties:
+ value:
+ - [{ get_resource: resources_storage }, "/opt/onap"]
+ #Contains node0 specific volume list.
+ node0_volumes:
+ type: OS::Heat::Value
+ properties:
+ #Note that it returns an empty list if nfs volume is disabled.
+ value:
+ if:
+ - use_volume_for_nfs
+ - - [{ get_resource: nfs_storage }, "/dockerdata-nfs"]
+ - []
#Output values
outputs:
+ network_name:
+ value: {get_attr: [privnet, name] }
+ description: "Name of private network"
+ network_id:
+ value: { get_resource: privnet }
+ description: "ID of private network"
+ subnet_id:
+ value: { get_resource: privsubnet }
+ description: "ID of private subnet"
installer_ip:
value: { get_attr: [installer, ip] }
description: "Internal ip of installer instance"
volumes:
description: "map of volumes per each instance"
value:
+ #Can do deep merging only with yaql.
yaql:
data:
- resources_volid: { get_resource: resources_storage }
- nfs_volid: { get_resource: nfs_storage }
- docker_volids: { get_attr: [nodes, docker_storage_id] }
- #This is going to create a map, where keys are instance names, and values are lists of
- #pairs of volume ids and their mount points.
- #This is done by merging few generated maps together, base map is taken by
- #enumerating over docker storage volumes and transforming them into a map like
- #{"node0"=>["volid","/var/lib/docker"],...], node1=>...}
- expression: 'dict($.data.docker_volids.enumerate().select(["node"+str($[0]), [[$[1], "/var/lib/docker"]]])).mergeWith({"infra" => [[$.data.resources_volid, "/opt/onap"]], "node0" => [[$.data.nfs_volid, "/dockerdata-nfs"]]})'
+ node_volumes: { get_attr: [node_volumes, value]}
+ infra_volumes: { infra: { get_attr: [infra_volumes, value] }}
+ node0_volumes: {node0: { get_attr: [node0_volumes, value] }}
+ expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)"