1 #This is the environment heat template, compatible with openstack ocata.
2 heat_template_version: 2017-02-24
3 description: "Heat template for deploying onap env"
6 label: "Auth public key"
7 description: "The public key used to authenticate to instances"
10 label: "name of node flavor"
11 description: "The name of the flavor used to create kubernetes nodes"
14 - custom_constraint: nova.flavor
15 description: "need to specify a valid flavor"
17 label: "name of infra flavor"
18 description: "flavor used to create infra instance"
21 - custom_constraint: nova.flavor
22 description: "need to specify a valid flavor"
23 installer_flavor_name:
24 label: "name of installer flavor"
25 description: "flavor used to create installer instance"
28 - custom_constraint: nova.flavor
29 description: "need to specify a valid flavor"
32 description: "name of the image from which to create all instances, should be rhel/centos 7.9 image"
35 - custom_constraint: glance.image
36 description: "must specify a valid image name"
38 label: "availability zone"
39 description: "availability zone to use for scheduling instances"
42 label: "private subnet cidr"
43 description: "Cidr of a private subnet instances will be connected to"
46 - custom_constraint: net_cidr
48 label: "subnet dhcp allocation range start"
49 description: "Start of range of dhcp allocatable ips on private subnet"
52 - custom_constraint: ip_addr
54 label: "end of subnet dhcp allocation range"
55 description: "End of private subnet's dhcp allocation range"
58 - custom_constraint: ip_addr
60 label: "ip address of router"
61 description: "IP address of the router allowing access to other networks incl. company network"
64 - custom_constraint: ip_addr
66 label: "dns resolvers"
67 description: "List of dns resolvers"
68 type: comma_delimited_list
70 label: "name of the public network"
71 description: "Name of the public, internet facing network, also allowing access to company internal hosts"
74 - custom_constraint: neutron.network
75 description: "Must specify a valid network name or id"
77 label: "external subnet cidr"
78 description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet."
81 - custom_constraint: net_cidr
83 label: "floating ip of the installer"
84 description: "a pre-allocated floating ip that will be associated with the installer instance"
87 label: "floating ip of the infra"
88 description: "a pre-allocated floating ip that will be associated with the infrastructure instance"
91 label: "floating ip of the first node"
92 description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap"
96 description: "the number of kubernetes nodes to create, min 1"
100 description: "must be a positive number"
103 label: "use volume for nfs storage"
104 description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk"
108 description: "specifies id of network used for demo usecases"
111 label: "nodes' docker storage size"
113 description: "Size of the volume for the docker storage on nodes"
115 #Condition for nfs volume usage.
116 use_volume_for_nfs: { get_param: use_volume_for_nfs }
118 # Security group used to secure access to instances.
120 type: OS::Neutron::SecurityGroup
123 # Egress rule allowing access to external_subnet_cidr.
126 remote_ip_prefix: { get_param: external_subnet_cidr }
127 # Ingress rule, allowing also inbound access by external network.
130 remote_ip_prefix: { get_param: external_subnet_cidr }
131 # Allow outbound communication with the internal subnet.
134 remote_ip_prefix: { get_param: subnet_cidr }
135 # Allow inbound communication from internal network.
138 remote_ip_prefix: { get_param: subnet_cidr }
139 # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound.
142 remote_ip_prefix: 169.254.0.0/16
143 #A network that our test environment will be connected to.
145 type: OS::Neutron::Net
146 #Subnet that instances will live in.
148 type: OS::Neutron::Subnet
150 network: { get_resource: privnet }
151 cidr: { get_param: subnet_cidr }
153 - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
154 gateway_ip: { get_param: router_addr }
155 dns_nameservers: { get_param: dns_nameservers }
157 #A port connected to the private network, taken by router.
159 type: OS::Neutron::Port
161 network: { get_resource: privnet }
163 - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } }
164 security_groups: [{ get_resource: secgroup }]
165 #This is a router, routing between us and the internet.
166 #It has an external gateway to public network.
168 type: OS::Neutron::Router
170 external_gateway_info:
171 network: { get_param: public_network_name }
172 #This is a router interface connecting it to our private subnet's router port.
174 type: OS::Neutron::RouterInterface
176 router: { get_resource: router }
177 port: { get_resource: routerport }
179 #Key used to authenticate to instances as root.
181 type: OS::Nova::KeyPair
183 name: { get_param: "OS::stack_name" }
184 public_key: { get_param: auth_key }
185 #Handle to signal about starting up of instances.
186 instance_wait_handle:
187 type: OS::Heat::WaitConditionHandle
188 #Monitor waiting for all instances to start.
190 type: OS::Heat::WaitCondition
192 handle: { get_resource: instance_wait_handle }
196 data: { num_nodes: { get_param: num_nodes } }
197 #This is number of all nodes + 2 (infra instance and installer)
198 expression: "$.data.num_nodes + 2"
199 #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.).
201 type: OS::Nova::ServerGroup
203 name: k8s nodes on separate computes
206 #Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
208 type: OS::Heat::ResourceGroup
210 count: { get_param: num_nodes }
215 key_name: { get_resource: key }
216 image_name: { get_param: image_name }
217 network: { get_resource: privnet }
218 subnet: { get_resource: privsubnet }
219 flavor_name: { get_param: node_flavor_name }
220 availability_zone: { get_param: availability_zone }
221 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
222 security_group: { get_resource: secgroup }
223 demo_network: { get_param: demo_network }
224 docker_storage_size: { get_param: docker_storage_size }
226 group: { get_resource: anti_affinity_group }
227 depends_on: [routercon, instance_wait_handle]
228 #Nfs storage volume for first node.
230 type: OS::Cinder::Volume
231 condition: use_volume_for_nfs
235 #Attachment of volume to first node.
236 nfs_storage_attachment:
237 type: OS::Cinder::VolumeAttachment
238 condition: use_volume_for_nfs
240 instance_uuid: { get_attr: [nodes, "resource.0"] }
241 volume_id: { get_resource: nfs_storage }
242 #Floating ip association for node (first only).
244 type: OS::Neutron::FloatingIPAssociation
246 floatingip_id: { get_param: node_ip }
247 port_id: { get_attr: ["nodes", "resource.0.port_id"] }
248 #Openstack volume used for storing resources.
250 type: "OS::Cinder::Volume"
252 name: "resources_storage"
254 #Instance representing infrastructure instance, created using subtemplate.
256 type: "instance.yaml"
259 network: { get_resource: privnet }
260 subnet: { get_resource: privsubnet }
261 key_name: { get_resource: key }
262 flavor_name: { get_param: infra_flavor_name }
263 availability_zone: { get_param: availability_zone }
264 image_name: { get_param: image_name }
265 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
266 security_group: { get_resource: secgroup }
268 demo_network: { get_param: demo_network }
269 depends_on: [instance_wait_handle]
270 #Volume attachment for infra node.
271 resources_storage_attachment:
272 type: OS::Cinder::VolumeAttachment
274 volume_id: { get_resource: resources_storage }
275 instance_uuid: { get_resource: infra }
276 #Floating ip association for infra.
278 type: OS::Neutron::FloatingIPAssociation
280 floatingip_id: { get_param: infra_ip }
281 port_id: { get_attr: ["infra", "port_id"] }
282 #Small installer vm having access to other instances, used to install onap.
284 type: "instance.yaml"
286 instance_name: installer
287 image_name: { get_param: image_name }
288 flavor_name: { get_param: installer_flavor_name }
289 availability_zone: { get_param: availability_zone }
290 key_name: { get_resource: key }
291 network: { get_resource: privnet }
292 subnet: { get_resource: privsubnet }
293 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
294 security_group: { get_resource: secgroup }
296 depends_on: instance_wait_handle
297 #Floating ip for installer.
299 type: OS::Neutron::FloatingIPAssociation
301 floatingip_id: { get_param: installer_ip }
302 port_id: { get_attr: [installer, port_id] }
303 #Map of node volumes, taken from volumes output param.
305 type: OS::Heat::Value
308 #We need yaql transformation to be done on the volume map.
312 #This is a map of node number to value of "volumes" attribute, that contains
313 #a list of volumes written as pairs [volumeid, mountpoint].
314 volumes: { get_attr: [nodes, attributes, volumes] }
315 #We need yaql expressions to transform node numbers to node names in the form "node0" and similar.
316 #However we don't need anything more complicated.
317 expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])"
318 #List of infra specific volumes (not a map as above).
320 type: OS::Heat::Value
323 - [{ get_resource: resources_storage }, "/opt/onap"]
324 #Contains node0 specific volume list.
326 type: OS::Heat::Value
328 #Note that it returns an empty list if nfs volume is disabled.
332 - - [{ get_resource: nfs_storage }, "/dockerdata-nfs"]
337 value: {get_attr: [privnet, name] }
338 description: "Name of private network"
340 value: { get_resource: privnet }
341 description: "ID of private network"
343 value: { get_resource: privsubnet }
344 description: "ID of private subnet"
346 value: { get_attr: [installer, ip] }
347 description: "Internal ip of installer instance"
349 value: { get_attr: [infra, ip] }
350 description: "Internal ip of infra instance"
352 value: { get_attr: [nodes, ip] }
353 description: "Serialized json list of node internal ips starting at node0"
355 description: "map of volumes per each instance"
357 #Can do deep merging only with yaql.
360 node_volumes: { get_attr: [node_volumes, value]}
361 infra_volumes: { infra: { get_attr: [infra_volumes, value] }}
362 node0_volumes: {node0: { get_attr: [node0_volumes, value] }}
363 expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)"