Merge "config for 5.0.0 policy-handler new PDP API"
[dcaegen2/deployments.git] / bootstrap / installer-docker.sh-template
1 #!/bin/bash
2 #
3 # ============LICENSE_START==========================================
4 # ===================================================================
5 # Copyright © 2017-2018 AT&T Intellectual Property. All rights reserved.
6 # ===================================================================
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 #        http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 # ============LICENSE_END============================================
19 #
20
21 # URLs for artifacts needed for installation
22 DESIGTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/dnsdesig/dns_types.yaml
23 DESIGPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn
24 SSHKEYTYPES=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/type_files/sshkeyshare/sshkey_types.yaml
25 SSHKEYPLUG=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn
26 OSPLUGINZIP=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip
27 OSPLUGINWGN=https://github.com/cloudify-cosmo/cloudify-openstack-plugin/releases/download/2.2.0/cloudify_openstack_plugin-2.2.0-py27-none-linux_x86_64-centos-Core.wgn
28
29 PLATBPSRC=https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.blueprints/releases/blueprints
30 DOCKERBP=DockerBP.yaml
31 CBSBP=config_binding_service.yaml
32 PGBP=pgaas-onevm.yaml
33 CDAPBP=cdapbp7.yaml
34 CDAPBROKERBP=cdap_broker.yaml
35 INVBP=inventory.yaml
36 DHBP=DeploymentHandler.yaml
37 PHBP=policy_handler.yaml
38 VESBP=ves.yaml
39 TCABP=tca.yaml
40 HRULESBP=holmes-rules.yaml
41 HENGINEBP=holmes-engine.yaml
42 PRHBP=prh.yaml
43 HVVESBP=hv-ves.yaml
44
45 DOCKERBPURL="${PLATBPSRC}/${DOCKERBP}"
46 CBSBPURL="${PLATBPSRC}/${CBSBP}"
47 PGBPURL="${PLATBPSRC}/${PGBP}"
48 CDAPBPURL="${PLATBPSRC}/${CDAPBP}"
49 CDAPBROKERBPURL="${PLATBPSRC}/${CDAPBROKERBP}"
50 INVBPURL="${PLATBPSRC}/${INVBP}"
51 DHBPURL="${PLATBPSRC}/${DHBP}"
52 PHBPURL="${PLATBPSRC}/${PHBP}"
53 VESBPURL="${PLATBPSRC}/${VESBP}"
54 TCABPURL="${PLATBPSRC}/${TCABP}"
55 HRULESBPURL="${PLATBPSRC}/${HRULESBP}"
56 HENGINEBPURL="${PLATBPSRC}/${HENGINEBP}"
57 PRHBPURL="${PLATBPSRC}/${PRHBP}"
58 HVVESBPURL="${PLATBPSRC}/${HVVESBP}"
59
60 LOCATIONID=$(printenv LOCATION)
61
62 # Make sure ssh doesn't prompt for new host or choke on a new host with an IP it's seen before
63 SSHOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
64 STARTDIR=$(pwd)
65
66 # clear out files for writing out floating IP addresses
67 rm -f "$STARTDIR"/config/runtime.ip.consul
68 rm -f "$STARTDIR"/config/runtime.ip.cm
69
70
71 SSHUSER=centos
72 PVTKEY=./config/key
73 INPUTS=./config/inputs.yaml
74
75 if [ "$LOCATION" = "" ]
76 then
77         echo 'Environment variable LOCATION not set.  Should be set to location ID for this installation.'
78         exit 1
79 fi
80
81 set -e
82 set -x
83
84 # Docker workaround for SSH key
85 # In order for the container to be able to access the key when it's mounted from the Docker host,
86 # the key file has to be world-readable.   But ssh itself will not work with a private key that's world readable.
87 # So we make a copy and change permissions on the copy.
88 # NB -- the key on the Docker host has to be world-readable, which means that, from the host machine, you
89 # can't use it with ssh.  It needs to be a world-readable COPY.
90 PVTKEY=./key600
91 cp ./config/key ${PVTKEY}
92 chmod 600 ${PVTKEY}
93
94 # Create a virtual environment
95 virtualenv dcaeinstall
96 source dcaeinstall/bin/activate
97
98 # forcing pip version (pip>=10.0.0 no longer support use wheel)
99 pip install pip==9.0.3 
100
101 # Install Cloudify
102 pip install cloudify==3.4.0
103
104 # Install the Cloudify OpenStack plugin 
105 wget -qO- ${OSPLUGINZIP} > openstack.zip
106 pip install openstack.zip
107
108 # Spin up a VM
109
110 # Get the Designate and SSH key type files and plugins
111 mkdir types
112 wget -qO- ${DESIGTYPES} > types/dns_types.yaml
113 wget -qO- ${SSHKEYTYPES} > types/sshkey_types.yaml
114
115 wget -O dnsdesig.wgn ${DESIGPLUG}
116 wget -O sshkeyshare.wgn ${SSHKEYPLUG}
117
118 wagon install -s dnsdesig.wgn
119 wagon install -s sshkeyshare.wgn
120
121 ## Fix up the inputs file to get the private key locally
122 sed -e "s#key_filename:.*#key_filename: $PVTKEY#" < ${INPUTS} > /tmp/local_inputs
123
124 # Now install the VM
125 # Don't exit on error after this point--keep container running so we can do uninstalls after a failure
126 set +e
127 if wget -O /tmp/centos_vm.yaml "${PLATBPSRC}"/centos_vm.yaml; then
128   mv -f /tmp/centos_vm.yaml ./blueprints/
129   echo "Succeeded in getting the newest centos_vm.yaml"
130 else
131   echo "Failed to update centos_vm.yaml, using default version"
132   rm -f /tmp/centos_vm.yaml
133 fi
134 set -e
135 cfy local init --install-plugins -p ./blueprints/centos_vm.yaml -i /tmp/local_inputs -i "datacenter=$LOCATION"
136 cfy local execute -w install --task-retries=10
137 PUBIP=$(cfy local outputs | grep -Po '"public_ip": "\K.*?(?=")')
138
139 # wait till the cloudify manager's sshd ready
140 while ! nc -z -v -w5 ${PUBIP} 22; do echo "."; done
141 sleep 10
142
143 echo "Installing Cloudify Manager on ${PUBIP}."
144 PVTIP=$(ssh $SSHOPTS -i "$PVTKEY" "$SSHUSER"@"$PUBIP" 'echo PVTIP=`curl --silent http://169.254.169.254/2009-04-04/meta-data/local-ipv4`' | grep PVTIP | sed 's/PVTIP=//')
145 if [ "$PVTIP" = "" ]
146 then
147     echo Cannot access specified machine at $PUBIP using supplied credentials
148     exit
149 fi
150
151
152 # Copy private key onto Cloudify Manager VM
153 PVTKEYPATH=$(cat ${INPUTS} | grep "key_filename" | cut -d "'" -f2)
154 PVTKEYNAME=$(basename $PVTKEYPATH)
155 PVTKEYDIR=$(dirname $PVTKEYPATH)
156 scp  $SSHOPTS -i $PVTKEY $PVTKEY $SSHUSER@$PUBIP:/tmp/$PVTKEYNAME
157 ssh -t $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mkdir -p $PVTKEYDIR
158 ssh -t  $SSHOPTS -i $PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/$PVTKEYNAME $PVTKEYPATH
159
160 ESMAGIC=$(uuidgen -r)
161 WORKDIR=$HOME/cmtmp
162 BSDIR=$WORKDIR/cmbootstrap
163 PVTKEY2=$BSDIR/id_rsa.cfybootstrap
164 TMPBASE=$WORKDIR/tmp
165 TMPDIR=$TMPBASE/lib
166 SRCS=$WORKDIR/srcs.tar
167 TOOL=$WORKDIR/tool.py
168 rm -rf $WORKDIR
169 mkdir -p $BSDIR $TMPDIR/cloudify/wheels $TMPDIR/cloudify/sources $TMPDIR/manager
170 chmod 700 $WORKDIR
171 cp "$PVTKEY" $PVTKEY2
172 cat >$TOOL <<!EOF
173 #!/usr/local/bin/python
174 #
175 import yaml
176 import sys
177 bsdir = sys.argv[1]
178 with open(bsdir + '/simple-manager-blueprint-inputs.yaml', 'r') as f:
179   inpyaml = yaml.load(f)
180 with open(bsdir + '/simple-manager-blueprint.yaml', 'r') as f:
181   bpyaml = yaml.load(f)
182 for param, value in bpyaml['inputs'].items():
183   if value.has_key('default') and not inpyaml.has_key(param):
184     inpyaml[param] = value['default']
185 print inpyaml['manager_resources_package']
186 !EOF
187
188 #
189 #       Try to disable attempt to download virtualenv when not needed
190 #
191 ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "echo y; mkdir -p /root/.virtualenv; echo '"'"'[virtualenv]'"'"' >/root/.virtualenv/virtualenv.ini; echo no-download=true >>/root/.virtualenv/virtualenv.ini"'
192
193 # Gather installation artifacts
194 # from documentation, URL for manager blueprints archive
195 BSURL=https://github.com/cloudify-cosmo/cloudify-manager-blueprints/archive/3.4.tar.gz
196 BSFILE=$(basename $BSURL)
197
198 umask 022
199 wget -qO- $BSURL >$BSDIR/$BSFILE
200 cd $BSDIR
201 tar xzvf $BSFILE
202 MRPURL=$(python $TOOL $BSDIR/cloudify-manager-blueprints-3.4)
203 MRPFILE=$(basename $MRPURL)
204 wget -qO- $MRPURL >$TMPDIR/cloudify/sources/$MRPFILE
205
206 tar cf $SRCS -C $TMPDIR cloudify
207 rm -rf $TMPBASE
208 #
209 # Load required package files onto VM
210 #
211 scp $SSHOPTS -i $PVTKEY2 $SRCS $SSHUSER@$PUBIP:/tmp/.
212 ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "cd /opt; tar xf /tmp/srcs.tar; chown -R root:root /opt/cloudify /opt/manager; rm -rf /tmp/srcs.tar"'
213 #
214 #       Install config file -- was done by DCAE controller.  What now?
215 #
216 ssh $SSHOPTS -t -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc '"'"'mkdir -p /opt/dcae; if [ -f /tmp/cfy-config.txt ]; then cp /tmp/cfy-config.txt /opt/dcae/config.txt && chmod 644 /opt/dcae/config.txt; fi'"'"
217 cd $WORKDIR
218
219 #
220 #       Check for and set up https certificate information
221 #
222 rm -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt
223 ssh -t $SSHOPTS -i $PVTKEY2 $SSHUSER@$PUBIP 'sudo bash -xc "openssl pkcs12 -in /opt/app/dcae-certificate/certificate.pkcs12 -passin file:/opt/app/dcae-certificate/.password -nodes -chain"' | awk 'BEGIN{x="/dev/null";}/-----BEGIN CERTIFICATE-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.crt";}/-----BEGIN PRIVATE KEY-----/{x="'$BSDIR'/cloudify-manager-blueprints-3.4/resources/ssl/server.key";}{print >x;}/-----END /{x="/dev/null";}'
224 USESSL=false
225 if [ -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.key -a -f $BSDIR/cloudify-manager-blueprints-3.4/resources/ssl/server.crt ]
226 then
227         USESSL=true
228 fi
229 #
230 #       Set up configuration for the bootstrap
231 #
232 export CLOUDIFY_USERNAME=admin CLOUDIFY_PASSWORD=encc0fba9f6d618a1a51935b42342b17658
233 cd $BSDIR/cloudify-manager-blueprints-3.4
234 cp simple-manager-blueprint.yaml bootstrap-blueprint.yaml
235 ed bootstrap-blueprint.yaml <<'!EOF'
236 /^node_types:/-1a
237   plugin_resources:
238     description: >
239       Holds any archives that should be uploaded to the manager.
240     default: []
241   dsl_resources:
242     description: >
243       Holds a set of dsl required resources
244     default: []
245 .
246 /^        upload_resources:/a
247           plugin_resources: { get_input: plugin_resources }
248 .
249 w
250 q
251 !EOF
252
253 sed <simple-manager-blueprint-inputs.yaml >bootstrap-inputs.yaml \
254         -e "s;.*public_ip: .*;public_ip: '$PUBIP';" \
255         -e "s;.*private_ip: .*;private_ip: '$PVTIP';" \
256         -e "s;.*ssh_user: .*;ssh_user: '$SSHUSER';" \
257         -e "s;.*ssh_key_filename: .*;ssh_key_filename: '$PVTKEY2';" \
258         -e "s;.*elasticsearch_java_opts: .*;elasticsearch_java_opts: '-Des.cluster.name=$ESMAGIC';" \
259         -e "/ssl_enabled: /s/.*/ssl_enabled: $USESSL/" \
260         -e "/security_enabled: /s/.*/security_enabled: $USESSL/" \
261         -e "/admin_password: /s/.*/admin_password: '$CLOUDIFY_PASSWORD'/" \
262         -e "/admin_username: /s/.*/admin_username: '$CLOUDIFY_USERNAME'/" \
263         -e "s;.*manager_resources_package: .*;manager_resources_package: 'http://169.254.169.254/nosuchthing/$MRPFILE';" \
264         -e "s;.*ignore_bootstrap_validations: .*;ignore_bootstrap_validations: true;" \
265
266 # Add plugin resources
267 # TODO Maintain plugin list as updates/additions occur
268 cat >>bootstrap-inputs.yaml <<'!EOF'
269 plugin_resources:
270   - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-openstack-plugin/1.4/cloudify_openstack_plugin-1.4-py27-none-linux_x86_64-centos-Core.wgn'
271   - 'http://repository.cloudifysource.org/org/cloudify3/wagons/cloudify-fabric-plugin/1.4.1/cloudify_fabric_plugin-1.4.1-py27-none-linux_x86_64-centos-Core.wgn'
272   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/dnsdesig-1.0.0-py27-none-any.wgn'
273   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/sshkeyshare-1.0.0-py27-none-any.wgn'
274   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.ccsdk.platform.plugins/releases/plugins/pgaas-1.0.0-py27-none-any.wgn'
275   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/cdapcloudify/cdapcloudify-14.2.5-py27-none-any.wgn'
276   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dcaepolicyplugin/dcaepolicyplugin-1.0.0-py27-none-any.wgn'
277   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/dockerplugin/dockerplugin-2.4.0-py27-none-any.wgn'
278   - 'https://nexus.onap.org/service/local/repositories/raw/content/org.onap.dcaegen2.platform.plugins/releases/plugins/relationshipplugin/relationshipplugin-1.0.0-py27-none-any.wgn'
279 !EOF
280 #
281 #       And away we go
282 #
283 cfy init -r
284 cfy bootstrap --install-plugins -p bootstrap-blueprint.yaml -i bootstrap-inputs.yaml
285 rm -f resources/ssl/server.key
286
287 # Install Consul VM via a blueprint
288 cd $STARTDIR
289 mkdir consul
290 cd consul
291 cfy init -r
292 cfy use -t ${PUBIP}
293 echo "Deploying Consul VM"
294
295 set +e
296 if wget -O /tmp/consul_cluster.yaml "${PLATBPSRC}"/consul_cluster.yaml; then
297   mv -f /tmp/consul_cluster.yaml ../blueprints/
298   echo "Succeeded in getting the newest consul_cluster.yaml"
299 else
300   echo "Failed to update consul_cluster.yaml, using default version"
301   rm -f /tmp/consul_cluster.yaml
302 fi
303 set -e
304 cfy install -p ../blueprints/consul_cluster.yaml -d consul -i ../${INPUTS} -i "datacenter=$LOCATION"
305
306 # Get the floating IP for one member of the cluster
307 # Needed for instructing the Consul agent on CM host to join the cluster
308 CONSULIP=$(cfy deployments outputs -d consul | grep -Po 'Value: \K.*')
309 echo Consul deployed at $CONSULIP
310
311 # Wait for Consul API to come up
312 until curl http://$CONSULIP:8500/v1/agent/services
313 do
314    echo Waiting for Consul API
315    sleep 60
316 done
317
318 # Wait for a leader to be elected
319 until [[ "$(curl -Ss http://$CONSULIP:8500/v1/status/leader)" != '""' ]]
320 do
321         echo Waiting for leader
322         sleep 30
323 done
324
325 # Instruct the client-mode Consul agent running on the CM to join the cluster
326 curl http://$PUBIP:8500/v1/agent/join/$CONSULIP
327
328 # Register Cloudify Manager in Consul via the local agent on CM host
329
330 REGREQ="
331 {
332   \"Name\" : \"cloudify_manager\",
333   \"ID\" : \"cloudify_manager\",
334   \"Tags\" : [\"http://${PUBIP}/api/v2.1\"],
335   \"Address\": \"${PUBIP}\",
336   \"Port\": 80,
337   \"Check\" : {
338     \"Name\" : \"cloudify_manager_health\",
339     \"Interval\" : \"300s\",
340     \"HTTP\" : \"http://${PUBIP}/api/v2.1/status\",
341     \"Status\" : \"passing\",
342     \"DeregisterCriticalServiceAfter\" : \"30m\"
343   }
344 }
345 "
346
347 curl -X PUT -H 'Content-Type: application/json' --data-binary "$REGREQ" http://$PUBIP:8500/v1/agent/service/register
348 # Make Consul address available to plugins on Cloudify Manager
349 # TODO probably not necessary anymore
350 ENVINI=$(mktemp)
351 cat <<!EOF > $ENVINI
352 [$LOCATION]
353 CONSUL_HOST=$CONSULIP
354 CONFIG_BINDING_SERVICE=config_binding_service
355 !EOF
356 scp $SSHOPTS -i ../$PVTKEY $ENVINI $SSHUSER@$PUBIP:/tmp/env.ini
357 ssh -t $SSHOPTS -i ../$PVTKEY $SSHUSER@$PUBIP sudo mv /tmp/env.ini /opt/env.ini
358 rm $ENVINI
359
360
361 ##### INSTALLATION OF PLATFORM COMPONENTS
362
363 # Get component blueprints
364 wget -P ./blueprints/docker/ ${DOCKERBPURL}
365 wget -P ./blueprints/cbs/ ${CBSBPURL}
366 wget -P ./blueprints/pg/ ${PGBPURL}
367 wget -P ./blueprints/cdap/ ${CDAPBPURL}
368 wget -P ./blueprints/cdapbroker/ ${CDAPBROKERBPURL}
369 wget -P ./blueprints/inv/ ${INVBPURL}
370 wget -P ./blueprints/dh/ ${DHBPURL}
371 wget -P ./blueprints/ph/ ${PHBPURL}
372 wget -P ./blueprints/ves/ ${VESBPURL}
373 wget -P ./blueprints/tca/ ${TCABPURL}
374 wget -P ./blueprints/hrules/ ${HRULESBPURL}
375 wget -P ./blueprints/hengine/ ${HENGINEBPURL}
376 wget -P ./blueprints/prh/ ${PRHBPURL}
377 wget -P ./blueprints/hv-ves/ ${HVVESBPURL}
378
379
380 # Set up the credentials for access to the Docker registry
381 curl -X PUT -H "Content-Type: application/json" --data-binary '[{"username":"docker", "password":"docker", "registry": "nexus3.onap.org:10001"}]'  http://${CONSULIP}:8500/v1/kv/docker_plugin/docker_logins
382
383 # Install platform Docker host
384 # Note we're still in the "consul" directory, which is init'ed for talking to CM
385
386 set +e
387 # Docker host for platform containers
388 cfy install -v -p ./blueprints/docker/${DOCKERBP} -b DockerBP -d DockerPlatform -i ../${INPUTS} -i "registered_dockerhost_name=platform_dockerhost" -i "registrator_image=onapdcae/registrator:v7" -i "location_id=${LOCATION}" -i "node_name=dokp00" -i "target_datacenter=${LOCATION}"
389
390 # Docker host for service containers
391 cfy deployments create -b DockerBP -d DockerComponent -i ../${INPUTS} -i "registered_dockerhost_name=component_dockerhost" -i "location_id=${LOCATION}" -i "registrator_image=onapdcae/registrator:v7" -i "node_name=doks00" -i "target_datacenter=${LOCATION}"
392 cfy executions start -d DockerComponent -w install
393
394 # wait for the extended platform VMs settle
395 #sleep 180
396
397
398 # CDAP cluster
399 cfy install -p ./blueprints/cdap/${CDAPBP} -b cdapbp7 -d cdap7 -i ../config/cdapinputs.yaml -i "location_id=${LOCATION}"
400
401 # config binding service
402 cfy install -p ./blueprints/cbs/${CBSBP} -b config_binding_service -d config_binding_service -i "location_id=${LOCATION}"
403
404
405 # Postgres
406 cfy install -p ./blueprints/pg/${PGBP} -b pgaas -d pgaas  -i ../${INPUTS}
407
408
409 # Inventory
410 cfy install -p ./blueprints/inv/${INVBP} -b PlatformServicesInventory -d PlatformServicesInventory -i "location_id=${LOCATION}" -i ../config/invinputs.yaml
411
412
413 # Deployment Handler DH
414 cat >../dhinputs <<EOL
415 application_config:
416   cloudify:
417     protocol: "http"
418   inventory:
419     protocol: "http"
420 EOL
421 cfy install -p ./blueprints/dh/${DHBP} -b DeploymentHandlerBP -d DeploymentHandler -i "location_id=${LOCATION}"  -i ../dhinputs
422
423
424 # Policy Handler PH
425 cfy install -p ./blueprints/ph/${PHBP} -b policy_handler_BP -d policy_handler -i 'policy_handler_image=nexus3.onap.org:10001/onap/org.onap.dcaegen2.platform.policy-handler:1.1-latest' -i "location_id=${LOCATION}" -i ../config/phinputs.yaml
426
427
428 # Wait for the CDAP cluster to be registered in Consul
429 echo "Waiting for CDAP cluster to register"
430 until curl -Ss http://${CONSULIP}:8500/v1/catalog/service/cdap | grep cdap
431 do 
432     echo -n .
433     sleep 30
434 done
435 echo "CDAP cluster registered"
436
437
438 # CDAP Broker
439 cfy install -p ./blueprints/cdapbroker/${CDAPBROKERBP} -b cdapbroker -d cdapbroker -i "location_id=${LOCATION}"
440
441
442 # VES
443 cfy install -p ./blueprints/ves/${VESBP} -b ves -d ves -i ../config/vesinput.yaml
444
445
446 # TCA
447 cfy install -p ./blueprints/tca/${TCABP} -b tca -d tca -i ../config/tcainputs.yaml
448
449 # Holmes
450 cfy install -p ./blueprints/hrules/${HRULESBP} -b hrules -d hrules -i ../config/hr-ip.yaml
451 cfy install -p ./blueprints/hengine/${HENGINEBP} -b hengine -d hengine -i ../config/he-ip.yaml
452
453 # PRH
454 cfy install -p ./blueprints/prh/${PRHBP} -b prh -d prh -i ../config/prh-input.yaml
455
456 # HV-VES
457 cfy install -p ./blueprints/hv-ves/${HVVESBP} -b hv-ves -d hv-ves
458
459
460 # write out IP addresses
461 echo "$CONSULIP" > "$STARTDIR"/config/runtime.ip.consul
462 echo "$PUBIP" > "$STARTDIR"/config/runtime.ip.cm
463
464
465 # Keep the container up
466 rm -f /tmp/ready_to_exit
467 while [ ! -e /tmp/ready_to_exit ]
468 do
469     sleep 30
470 done