Fixed CDR_IMAGE_VERSION in portal script
[demo.git] / boot / dcae2_vm_init.sh
index 0d9bebc..5fd2d66 100755 (executable)
@@ -344,7 +344,7 @@ verify_multicloud_registration()
 }
 
 
-register_dns_zone()
+register_dns_zone_proxied_designate()
 {
     local CLOUD_OWNER='pod25' 
     local CLOUD_REGION
@@ -426,6 +426,92 @@ register_dns_zone()
 }
 
 
+register_dns_zone_designate()
+{
+    local HEADER_CONTENT_TYPE_JSON="Content-Type: application/json"
+    local HEADER_ACCEPT_JSON="Accept: application/json"
+    local HEADER_TOKEN
+    local DCAE_ZONE
+    local DCAE_DOMAIN
+    local ZONE_NAME
+    local ZONE_ID
+    local KEYSTONE_URL
+    local API_ENDPOINT
+    local API_DATA
+    local TENANT_NAME
+    local TENANT_ID
+    local ZONE_PROJECT_ID
+    
+    if [ -z "$1" ]; then DCAE_ZONE="$(cat /opt/config/dcae_zone.txt)"; else DCAE_ZONE="$1"; fi
+    DCAE_DOMAIN="$(cat /opt/config/dcae_domain.txt)"
+    ZONE_NAME="${DCAE_ZONE}.${DCAE_DOMAIN}."
+
+    TENANT_NAME="$(cat /opt/config/tenant_name.txt)"
+    TENANT_ID="$(cat /opt/config/tenant_id.txt)"
+
+    KEYSTONE_URL="$(cat /opt/config/openstack_keystone_url.txt)"
+    if [[ "$KEYSTONE_URL" == */v3 ]]; then
+        echo "$KEYSTONE_URL"
+    elif [[ "$KEYSTONE_URL" == */v2.0 ]]; then
+        echo "$KEYSTONE_URL"
+    else
+        KEYSTONE_URL="${KEYSTONE_URL}/v2.0"
+        echo "$KEYSTONE_URL"
+    fi
+
+    USERNAME="$(cat /opt/config/openstack_user.txt)"
+    PASSWORD="$(cat /opt/config/openstack_password.txt)"
+
+
+    API_ENDPOINT="${KEYSTONE_URL}/tokens"
+    API_DATA="{\"auth\": {\"project\": \"${TENANT_NAME}\", \"tenantId\": \"${TENANT_ID}\", \"passwordCredentials\": {\"username\": \"${USERNAME}\", \"password\": \"${PASSWORD}\"}}}"
+    
+    echo "===> Getting token from ${API_ENDPOINT}"
+    RESP=$(curl -s -v -H "${HEADER_CONTENT_TYPE_JSON}" -d "${API_DATA}" "${API_ENDPOINT}")
+
+    TOKEN="$(echo ${RESP} | jq -r .access.token.id)"
+    if [ -z "$TOKEN" ]; then
+        echo "Faile to acquire token for creating DNS zone.  Exit"
+        exit 1
+    fi
+    HEADER_TOKEN="X-Auth-Token: ${TOKEN}" 
+
+    DESIGNATE_URL=$(echo ${RESP} | jq -r '.access.serviceCatalog[] | select(.name=="designate") | .endpoints[0].publicURL')
+    if [ -z ${DESIGNATE_URL} ]; then
+        echo "Fail to find Designate API endpoint.  Exit"
+        exit 1
+    fi
+
+
+    API_ENDPOINT="${DESIGNATE_URL}/v2/zones"
+    echo "===> Register DNS zone $ZONE_NAME at Designate API endpoint ${API_ENDPOINT}"
+   
+    RESP=$(curl -v -s -H "$HEADER_TOKEN" "$API_ENDPOINT")
+    ZONE_ID=$(echo $RESP |jq -r --arg ZONE_NAME "$ZONE_NAME" '.zones[] |select(.name==$ZONE_NAME) |.id')
+    if [ -z "$ZONE_ID" ]; then
+        echo "======> Zone ${ZONE_NAME} does not exist.  Create"
+        API_DATA="{\"name\": \"${ZONE_NAME}\", \"email\": \"dcae@onap.org\", \"type\": \"PRIMARY\", \"ttl\": 7200, \"description\": \"DCAE DNS zoen created for ONAP deployment $DCAE_ZONE\"}"
+        RESP=$(curl -s -v -X POST -H "$HEADER_ACCEPT_JSON" -H "$HEADER_CONTENT_TYPE_JSON" -H "$HEADER_TOKEN" -d "$API_DATA" "$API_ENDPOINT")
+        ZONE_ID=$(echo $RESP | jq .id)
+
+        if [ -z "$ZONE_ID" ]; then
+            echo "Fail to create DNS zone ${ZONE_NAME}.  Exit"
+            exit 1
+        fi
+    else
+        echo "======> Zone ${ZONE_NAME} already exists."
+        API_ENDPOINT="${DESIGNATE_URL}/v2/zones/${ZONE_ID}"
+        RESP=$(curl -s -v -H "$HEADER_ACCEPT_JSON" -H "$HEADER_TOKEN" "$API_ENDPOINT")
+        ZONE_PROJECT_ID=$(echo $RESP | jq -r .project_id)
+        if [ "$ZONE_PROJECT_ID" != "noauth-project" ] && [ "$ZONE_PROJECT_ID" != "$TENANT_ID" ]; then
+            echo "======> Zone ${ZONE_NAME} owned by other projects, may have problem creating records"
+        else
+            echo "======> Zone ${ZONE_NAME} okay to create new records"
+        fi
+    fi
+}
+
 delete_dns_zone()
 {
     local CLOUD_OWNER='pod25'
@@ -509,9 +595,28 @@ list_dns_zone()
     curl -v -s  -H "Content-Type: application/json" -H "X-Auth-Token: $TOKEN" -X GET "${MULTICLOUD_PLUGIN_ENDPOINT}/dns-delegate/v2/zones/${ZONEID}/recordsets"
 }
 
+################################## start of vm_init #####################################
 
+# prepare the configurations needed by DCAEGEN2 installer
+rm -rf /opt/app/config
+mkdir -p /opt/app/config
 
 
+# private key
+sed -e 's/\\n/\n/g' /opt/config/priv_key | sed -e 's/^[ \t]*//g; s/[ \t]*$//g' > /opt/app/config/key
+chmod 777 /opt/app/config/key
+
+# move keystone url file
+#cp /opt/config/keystone_url.txt /opt/app/config/keystone_url.txt
+
+
+URL_ROOT='nexus.onap.org/service/local/repositories/raw/content'
+REPO_BLUEPRINTS='org.onap.dcaegen2.platform.blueprints'
+REPO_DEPLOYMENTS='org.onap.dcaegen2.deployments'
+if [ -e /opt/config/dcae_deployment_profile.txt ]; then
+  DEPLOYMENT_PROFILE=$(cat /opt/config/dcae_deployment_profile.txt)
+fi
+DEPLOYMENT_PROFILE=${DEPLOYMENT_PROFILE:-R1}
 
 NEXUS_USER=$(cat /opt/config/nexus_username.txt)
 NEXUS_PASSWORD=$(cat /opt/config/nexus_password.txt)
@@ -522,16 +627,46 @@ ZONE=$(cat /opt/config/rand_str.txt)
 MYFLOATIP=$(cat /opt/config/dcae_float_ip.txt)
 MYLOCALIP=$(cat /opt/config/dcae_ip_addr.txt)
 
+
 # start docker image pulling while we are waiting for A&AI to come online
 docker login -u "$NEXUS_USER" -p "$NEXUS_PASSWORD" "$NEXUS_DOCKER_REPO"
-docker pull "$NEXUS_DOCKER_REPO/onap/org.onap.dcaegen2.deployments.bootstrap:$DOCKER_VERSION" && docker pull nginx &
 
-#########################################
-# Wait for then register with A&AI
-########################################
 
-DNSAAS_PROXYED=$(tr '[:upper:]' '[:lower:]' < /opt/config/dnsaas_config_enabled.txt)
-if [ "$DNSAAS_PROXYED" == 'true' ]; then
+if [ "$DEPLOYMENT_PROFILE" == "R1" ]; then
+  RELEASE_TAG='releases'
+  # download blueprint input template files
+  rm -rf /opt/app/inputs-templates
+  mkdir -p /opt/app/inputs-templates
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/inputs.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/cdapinputs.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/phinputs.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/dhinputs.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/invinputs.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/vesinput.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/tcainputs.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/he-ip.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_BLUEPRINTS}/${RELEASE_TAG}/input-templates/hr-ip.yaml
+
+  # generate blueprint input files
+  pip install --upgrade jinja2
+  wget https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/scripts/detemplate-bpinputs.py \
+    && \
+    (python detemplate-bpinputs.py /opt/config /opt/app/inputs-templates /opt/app/config; \
+     rm detemplate-bpinputs.py)
+
+  # Run docker containers
+  cd /opt
+
+
+  docker pull "$NEXUS_DOCKER_REPO/onap/org.onap.dcaegen2.deployments.bootstrap:$DOCKER_VERSION" \
+    && docker pull nginx &
+
+  #########################################
+  # Wait for then register with A&AI
+  ########################################
+
+  DNSAAS_PROXYED=$(tr '[:upper:]' '[:lower:]' < /opt/config/dnsaas_config_enabled.txt)
+  if [ "$DNSAAS_PROXYED" == 'true' ]; then
     echo "Using proxyed DNSaaS service, performing additional registration and configuration"
     wait_for_aai_ready
 
@@ -541,49 +676,204 @@ if [ "$DNSAAS_PROXYED" == 'true' ]; then
     verify_multicloud_registration
 
     wait_for_multicloud_ready
-    register_dns_zone "$ZONE" 
+    register_dns_zone_proxied_designate "$ZONE" 
     echo "Registration and configuration for proxying DNSaaS completed."
-else
-    echo "Using proxyed DNSaaS service, performing additional registration and configuration"
-fi
+  else
+    echo "Using Designate DNSaaS service, performing additional registration and configuration"
+    register_dns_zone_designate "$ZONE" 
+  fi
 
+  #########################################
+  # Start DCAE Bootstrap container
+  #########################################
 
+  chmod 777 /opt/app/config
+  rm -f /opt/config/runtime.ip.consul
+  rm -f /opt/config/runtime.ip.cm
 
 
+  #docker login -u "$NEXUS_USER" -p "$NEXUS_PASSWORD" "$NEXUS_DOCKER_REPO"
+  #docker pull "$NEXUS_DOCKER_REPO/onap/org.onap.dcaegen2.deployments.bootstrap:$DOCKER_VERSION"
+  docker run -d --name boot -v /opt/app/config:/opt/app/installer/config -e "LOCATION=$ZONE" "$NEXUS_DOCKER_REPO/onap/org.onap.dcaegen2.deployments.bootstrap:$DOCKER_VERSION"
 
-#########################################
-# Start DCAE Bootstrap container
-#########################################
 
-chmod 777 /opt/app/config
-rm -f /opt/config/runtime.ip.consul
-rm -f /opt/config/runtime.ip.cm
+  # waiting for bootstrap to complete then starting nginx for proxying healthcheck calls
+  echo "Waiting for Consul to become accessible"
+  while [ ! -f /opt/app/config/runtime.ip.consul ]; do echo "."; sleep 30; done
 
 
-#docker login -u "$NEXUS_USER" -p "$NEXUS_PASSWORD" "$NEXUS_DOCKER_REPO"
-#docker pull "$NEXUS_DOCKER_REPO/onap/org.onap.dcaegen2.deployments.bootstrap:$DOCKER_VERSION"
-docker run -d --name boot -v /opt/app/config:/opt/app/installer/config -e "LOCATION=$ZONE" "$NEXUS_DOCKER_REPO/onap/org.onap.dcaegen2.deployments.bootstrap:$DOCKER_VERSION"
-
-
-# waiting for bootstrap to complete then starting nginx for proxying healthcheck calls
-echo "Waiting for Consul to become accessible"
-while [ ! -f /opt/app/config/runtime.ip.consul ]; do echo "."; sleep 30; done
-
+  # start proxy for consul's health check
+  CONSULIP=$(head -1 /opt/app/config/runtime.ip.consul | sed 's/[[:space:]]//g')
+  echo "Consul is available at $CONSULIP" 
+fi
 
+if [[ $DEPLOYMENT_PROFILE == R2* ]]; then
+  RELEASE_TAG='R2'
+  set +e
+  rm -rf /opt/app/inputs-templates
+  mkdir -p /opt/app/inputs-templates
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/docker-compose-1.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/docker-compose-2.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/docker-compose-3.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/docker-compose-4.yaml
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/register.sh
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/setup.sh
+  wget -P /opt/app/inputs-templates https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/heat/teardown.sh
+
+  pip install --upgrade jinja2
+  wget https://${URL_ROOT}/${REPO_DEPLOYMENTS}/${RELEASE_TAG}/scripts/detemplate-bpinputs.py \
+    && \
+    (python detemplate-bpinputs.py /opt/config /opt/app/inputs-templates /opt/app/config; \
+     rm detemplate-bpinputs.py)
+
+  if [ -e /opt/app/config/register.sh ]; then
+    chmod +x /opt/app/config/register.sh
+  fi
+  if [ -e /opt/app/config/setup.sh ]; then
+    chmod +x /opt/app/config/setup.sh
+  fi
+  if [ -e /opt/app/config/build-plugins.sh ]; then
+    chmod +x /opt/app/config/build-plugins.sh
+  fi
+  set -e
+
+  cd /opt/app/config
+  # deploy essentials
+  /opt/docker/docker-compose -f docker-compose-1.yaml up -d
+  echo "Waiting for Consul to come up ready"
+  while ! nc -z localhost 8500; do sleep 1; done
+  echo "Waiting for DB to come up ready"
+  while ! nc -z localhost 5432; do sleep 1; done
+  echo "Waiting for CBS to come up ready"
+  while ! nc -z localhost 10000; do sleep 1; done
+  echo "All dependencies are up, proceed to the next phase"
+  sleep 30
+
+  echo "Setup CloudifyManager and Registrator"
+  ./setup.sh
+  sleep 10
+  ./register.sh
+
+  echo "Bring up DCAE MIN service components for R2 use cases"
+  /opt/docker/docker-compose -f docker-compose-2.yaml up -d
+
+  if [[ "$DEPLOYMENT_PROFILE" == "R2" || "$DEPLOYMENT_PROFILE" == "R2PLUS" ]]; then
+    echo "Bring up DCAE platform components"
+    /opt/docker/docker-compose -f docker-compose-3.yaml up -d
+
+    if [ "$DEPLOYMENT_PROFILE" == "R2PLUS" ]; then
+      echo "Bring up additional (plus) DCAE service components"
+      /opt/docker/docker-compose -f docker-compose-4.yaml up -d
+    fi
+  fi
 
-# start proxy for consul's health check
-CONSULIP=$(head -1 /opt/app/config/runtime.ip.consul | sed 's/[[:space:]]//g')
-echo "Consul is available at $CONSULIP" 
+  # start proxy for consul's health check
+  CONSULIP=$(cat /opt/config/dcae_ip_addr.txt)
+  echo "Consul is available at $CONSULIP"
+fi
 
 cat >./nginx.conf <<EOL
 server {
     listen 80;
     server_name dcae.simpledemo.onap.org;
+    root /www/healthcheck;
+
     location /healthcheck {
+        try_files /services.yaml =404;
+    }
+    location /R1 {
         proxy_pass http://${CONSULIP}:8500/v1/health/state/passing;
     }
+    location /R2MIN{
+        try_files /r2mvp_healthy.yaml =404;
+    }
+    location /R2 {
+        try_files /r2_healthy.yaml =404;
+    }
+    location /R2PLUS {
+        try_files /r2plus_healthy.yaml =404;
+    }
 }
 EOL
-docker run --name dcae-proxy -p 8080:80 -v "$(pwd)/nginx.conf:/etc/nginx/conf.d/default.conf" -d nginx
-echo "Healthcheck API available at http://${MYFLOATIP}:8080/healthcheck"
-echo "                          or http://${MYLOCALIP}:8080/healthcheck"
+
+HEALTHPORT=8000
+docker run -d \
+--name dcae-health \
+-p ${HEALTHPORT}:80 \
+-v "$(pwd)/nginx.conf:/etc/nginx/conf.d/default.conf" \
+-v "/tmp/healthcheck:/www/healthcheck" \
+--label "SERVICE_80_NAME=dcae-health" \
+--label "SERVICE_80_CHECK_HTTP=/healthcheck" \
+--label "SERVICE_80_CHECK_INTERVAL=15s" \
+--label "SERVICE_80_CHECK_INITIAL_STATUS=passing" \
+ nginx
+
+echo "Healthcheck API available at http://${MYFLOATIP}:${HEALTHPORT}/healthcheck"
+echo "                             http://${MYFLOATIP}:${HEALTHPORT}/R1"
+echo "                             http://${MYFLOATIP}:${HEALTHPORT}/R2MIN"
+echo "                             http://${MYFLOATIP}:${HEALTHPORT}/R2PLUS"
+
+# run forever for updating health status based on consul
+set +e
+while :
+do
+  rm -rf /tmp/healthcheck/*
+  # all registered services
+  SERVICES=$(curl -s http://consul:8500/v1/agent/services |jq '. | to_entries[] | .value.Service')
+  # passing services
+  SERVICES=$(curl -s http://consul:8500/v1/health/state/passing | jq '.[] | .ServiceName')
+
+  # remove empty lines/entries
+  SERVICES=$(echo "$SERVICES" | sed '/^\s*\"\"\s*$/d' |sed '/^\s*$/d')
+
+  SERVICES_JSON=$(echo "$SERVICES" | sed 's/\"$/\",/g' | sed '$ s/.$//')
+
+  echo "$(date): running healthy services:"
+  echo ">>> " $SERVICES
+  PLT_CONSUL=$(echo "$SERVICES" |grep consul)
+  PLT_CBS=$(echo "$SERVICES" |grep "config_binding_service")
+  MVP_PG_HOLMES=$(echo "$SERVICES" |grep "pgHolmes")
+  MVP_VES=$(echo "$SERVICES" |grep "mvp.*ves")
+  MVP_TCA=$(echo "$SERVICES" |grep "mvp.*tca")
+  MVP_HR=$(echo "$SERVICES" |grep "mvp.*holmes-rule")
+  MVP_HE=$(echo "$SERVICES" |grep "mvp.*holmes-engine")
+
+  PLT_CM=$(echo "$SERVICES" |grep "cloudify.*manager")
+  PLT_DH=$(echo "$SERVICES" |grep "deployment.*handler")
+  PLT_PH=$(echo "$SERVICES" |grep "policy.*handler")
+  PLT_SCH=$(echo "$SERVICES" |grep "service.*change.*handler")
+  PLT_INV=$(echo "$SERVICES" |grep "inventory")
+  PLT_PG_INVENTORY=$(echo "$SERVICES" |grep "pgInventory")
+
+  PLUS_MHB=$(echo "$SERVICES" |grep "heartbeat")
+  PLUS_PRH=$(echo "$SERVICES" |grep "prh")
+  PLUS_MPR=$(echo "$SERVICES" |grep "mapper")
+  PLUS_TRAP=$(echo "$SERVICES" |grep "snmptrap")
+
+  DATA="{\"healthy\" : \"$(date)\", \"healthy_services\": [${SERVICES_JSON}]}"
+  if [[ -n "$PLT_CONSUL" && -n "$PLT_CBS" && -n "$MVP_PG_HOLMES" && -n "$MVP_VES" && \
+        -n "$MVP_TCA" && -n "$MVP_HR" && -n "$MVP_HE" ]]; then
+    echo "${DATA}" > /tmp/healthcheck/r2mvp_healthy.yaml
+    echo "${DATA}" > /tmp/healthcheck/services.yaml
+    echo ">>>>>> enough services satisfying R2MIN service deployment"
+  else
+    echo ">>>>>> not enough services satisfying R2MIN service deployment"
+  fi
+
+  if [[ -n "$PLT_CONSUL" && -n "$PLT_CBS" && -n "$PLT_CM" && -n "$PLT_DH" && \
+        -n "$PLT_PH" && -n "$PLT_SCH" && -n "$PLT_INV" && -n "$PLT_PG_INVENTORY" ]]; then
+    echo ">>>>>> enough services satisfying R2 platform deployment"
+    echo "${DATA}" > /tmp/healthcheck/r2_healthy.yaml
+
+    if [[ -n "$PLUS_MHB" && -n "$PLUS_PRH" && -n "$PLUS_MPR" && -n "$PLUS_TRAP" ]]; then
+      echo ">>>>>> enough services satisfying R2PLUS deployment"
+      echo "${DATA}" > /tmp/healthcheck/r2plus_healthy.yaml
+    else
+      echo ">>>>>> not enough services satisfying R2PLUS service deployment"
+    fi
+  else
+    echo ">>>>>> not enough services satisfying R2 platform or R2PLUS service deployment"
+  fi
+  sleep 60
+done
+