1 #This is the environment heat template, compatible with openstack ocata.
2 heat_template_version: 2017-02-24
3 description: "Heat template for deploying onap env"
6 label: "Auth public key"
7 description: "The public key used to authenticate to instances"
10 label: "name of node flavor"
11 description: "The name of the flavor used to create kubernetes nodes"
14 - custom_constraint: nova.flavor
15 description: "need to specify a valid flavor"
17 label: "name of infra flavor"
18 description: "flavor used to create infra instance"
21 - custom_constraint: nova.flavor
22 description: "need to specify a valid flavor"
23 installer_flavor_name:
24 label: "name of installer flavor"
25 description: "flavor used to create installer instance"
28 - custom_constraint: nova.flavor
29 description: "need to specify a valid flavor"
32 description: "name of the image from which to create all instances, should be rhel/centos 7.9 image"
35 - custom_constraint: glance.image
36 description: "must specify a valid image name"
38 label: "private subnet cidr"
39 description: "Cidr of a private subnet instances will be connected to"
42 - custom_constraint: net_cidr
44 label: "subnet dhcp allocation range start"
45 description: "Start of range of dhcp allocatable ips on private subnet"
48 - custom_constraint: ip_addr
50 label: "end of subnet dhcp allocation range"
51 description: "End of private subnet's dhcp allocation range"
54 - custom_constraint: ip_addr
56 label: "ip address of router"
57 description: "IP address of the router allowing access to other networks incl. company network"
60 - custom_constraint: ip_addr
62 label: "dns resolvers"
63 description: "List of dns resolvers"
64 type: comma_delimited_list
66 label: "name of the public network"
67 description: "Name of the public, internet facing network, also allowing access to company internal hosts"
70 - custom_constraint: neutron.network
71 description: "Must specify a valid network name or id"
73 label: "external subnet cidr"
74 description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet."
77 - custom_constraint: net_cidr
79 label: "floating ip of the installer"
80 description: "a pre-allocated floating ip that will be associated with the installer instance"
83 label: "floating ip of the infra"
84 description: "a pre-allocated floating ip that will be associated with the infrastructure instance"
87 label: "floating ip of the first node"
88 description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap"
92 description: "the number of kubernetes nodes to create, min 1"
96 description: "must be a positive number"
99 label: "use volume for nfs storage"
100 description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk"
104 description: "specifies id of network used for demo usecases"
107 label: "nodes' docker storage size"
109 description: "Size of the volume for the docker storage on nodes"
111 #Condition for nfs volume usage.
112 use_volume_for_nfs: { get_param: use_volume_for_nfs }
114 # Security group used to secure access to instances.
116 type: OS::Neutron::SecurityGroup
119 # Egress rule allowing access to external_subnet_cidr.
122 remote_ip_prefix: { get_param: external_subnet_cidr }
123 # Ingress rule, allowing also inbound access by external network.
126 remote_ip_prefix: { get_param: external_subnet_cidr }
127 # Allow outbound communication with the internal subnet.
130 remote_ip_prefix: { get_param: subnet_cidr }
131 # Allow inbound communication from internal network.
134 remote_ip_prefix: { get_param: subnet_cidr }
135 # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound.
138 remote_ip_prefix: 169.254.0.0/16
139 #A network that our test environment will be connected to.
141 type: OS::Neutron::Net
142 #Subnet that instances will live in.
144 type: OS::Neutron::Subnet
146 network: { get_resource: privnet }
147 cidr: { get_param: subnet_cidr }
149 - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
150 gateway_ip: { get_param: router_addr }
151 dns_nameservers: { get_param: dns_nameservers }
153 #A port connected to the private network, taken by router.
155 type: OS::Neutron::Port
157 network: { get_resource: privnet }
159 - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } }
160 security_groups: [{ get_resource: secgroup }]
161 #This is a router, routing between us and the internet.
162 #It has an external gateway to public network.
164 type: OS::Neutron::Router
166 external_gateway_info:
167 network: { get_param: public_network_name }
168 #This is a router interface connecting it to our private subnet's router port.
170 type: OS::Neutron::RouterInterface
172 router: { get_resource: router }
173 port: { get_resource: routerport }
175 #Key used to authenticate to instances as root.
177 type: OS::Nova::KeyPair
179 name: { get_param: "OS::stack_name" }
180 public_key: { get_param: auth_key }
181 #Handle to signal about starting up of instances.
182 instance_wait_handle:
183 type: OS::Heat::WaitConditionHandle
184 #Monitor waiting for all instances to start.
186 type: OS::Heat::WaitCondition
188 handle: { get_resource: instance_wait_handle }
192 data: { num_nodes: { get_param: num_nodes } }
193 #This is number of all nodes + 2 (infra instance and installer)
194 expression: "$.data.num_nodes + 2"
195 #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.).
197 type: OS::Nova::ServerGroup
199 name: k8s nodes on separate computes
202 #Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
204 type: OS::Heat::ResourceGroup
206 count: { get_param: num_nodes }
211 key_name: { get_resource: key }
212 image_name: { get_param: image_name }
213 network: { get_resource: privnet }
214 subnet: { get_resource: privsubnet }
215 flavor_name: { get_param: node_flavor_name }
216 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
217 security_group: { get_resource: secgroup }
218 demo_network: { get_param: demo_network }
219 docker_storage_size: { get_param: docker_storage_size }
221 group: { get_resource: anti_affinity_group }
222 depends_on: [routercon, instance_wait_handle]
223 #Nfs storage volume for first node.
225 type: OS::Cinder::Volume
226 condition: use_volume_for_nfs
230 #Attachment of volume to first node.
231 nfs_storage_attachment:
232 type: OS::Cinder::VolumeAttachment
233 condition: use_volume_for_nfs
235 instance_uuid: { get_attr: [nodes, "resource.0"] }
236 volume_id: { get_resource: nfs_storage }
237 #Floating ip association for node (first only).
239 type: OS::Neutron::FloatingIPAssociation
241 floatingip_id: { get_param: node_ip }
242 port_id: { get_attr: ["nodes", "resource.0.port_id"] }
243 #Openstack volume used for storing resources.
245 type: "OS::Cinder::Volume"
247 name: "resources_storage"
249 #Instance representing infrastructure instance, created using subtemplate.
251 type: "instance.yaml"
254 network: { get_resource: privnet }
255 subnet: { get_resource: privsubnet }
256 key_name: { get_resource: key }
257 flavor_name: { get_param: infra_flavor_name }
258 image_name: { get_param: image_name }
259 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
260 security_group: { get_resource: secgroup }
262 demo_network: { get_param: demo_network }
263 depends_on: [instance_wait_handle]
264 #Volume attachment for infra node.
265 resources_storage_attachment:
266 type: OS::Cinder::VolumeAttachment
268 volume_id: { get_resource: resources_storage }
269 instance_uuid: { get_resource: infra }
270 #Floating ip association for infra.
272 type: OS::Neutron::FloatingIPAssociation
274 floatingip_id: { get_param: infra_ip }
275 port_id: { get_attr: ["infra", "port_id"] }
276 #Small installer vm having access to other instances, used to install onap.
278 type: "instance.yaml"
280 instance_name: installer
281 image_name: { get_param: image_name }
282 flavor_name: { get_param: installer_flavor_name }
283 key_name: { get_resource: key }
284 network: { get_resource: privnet }
285 subnet: { get_resource: privsubnet }
286 notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
287 security_group: { get_resource: secgroup }
289 depends_on: instance_wait_handle
290 #Floating ip for installer.
292 type: OS::Neutron::FloatingIPAssociation
294 floatingip_id: { get_param: installer_ip }
295 port_id: { get_attr: [installer, port_id] }
296 #Map of node volumes, taken from volumes output param.
298 type: OS::Heat::Value
301 #We need yaql transformation to be done on the volume map.
305 #This is a map of node number to value of "volumes" attribute, that contains
306 #a list of volumes written as pairs [volumeid, mountpoint].
307 volumes: { get_attr: [nodes, attributes, volumes] }
308 #We need yaql expressions to transform node numbers to node names in the form "node0" and similar.
309 #However we don't need anything more complicated.
310 expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])"
311 #List of infra specific volumes (not a map as above).
313 type: OS::Heat::Value
316 - [{ get_resource: resources_storage }, "/opt/onap"]
317 #Contains node0 specific volume list.
319 type: OS::Heat::Value
321 #Note that it returns an empty list if nfs volume is disabled.
325 - - [{ get_resource: nfs_storage }, "/dockerdata-nfs"]
330 value: {get_attr: [privnet, name] }
331 description: "Name of private network"
333 value: { get_resource: privnet }
334 description: "ID of private network"
336 value: { get_resource: privsubnet }
337 description: "ID of private subnet"
339 value: { get_attr: [installer, ip] }
340 description: "Internal ip of installer instance"
342 value: { get_attr: [infra, ip] }
343 description: "Internal ip of infra instance"
345 value: { get_attr: [nodes, ip] }
346 description: "Serialized json list of node internal ips starting at node0"
348 description: "map of volumes per each instance"
350 #Can do deep merging only with yaql.
353 node_volumes: { get_attr: [node_volumes, value]}
354 infra_volumes: { infra: { get_attr: [infra_volumes, value] }}
355 node0_volumes: {node0: { get_attr: [node0_volumes, value] }}
356 expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)"