--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: appc-db
+ namespace: "{{ .Values.nsPrefix }}-appc"
+ labels:
+ name: appc-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/appc/data
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: appc-db
+ namespace: "{{ .Values.nsPrefix }}-appc"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: appc-db
restartPolicy: Always
volumes:
- name: appc-data
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/appc/data
+ persistentVolumeClaim:
+ claimName: appc-db
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+#ignore config docker image files
+docker
+createConfig.sh
\ No newline at end of file
--- /dev/null
+apiVersion: v1
+description: ONAP configuration pod
+name: config
+version: 1.1.0
create_configuration() {
create_namespace $1
- kubectl --namespace $1 create -f pod-config-init.yaml
+ helm install . --name "$1-config" --namespace $1 --set nsPrefix=$1
}
#MAINs
from ubuntu:16.04
+ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST"
+# Setup Corporate proxy
+ENV https_proxy ${HTTPS_PROXY}
+ENV http_proxy ${HTTP_PROXY}
+
+# Additional packages
+RUN apt-get update
+RUN apt-get install -y openssl vim-common
RUN mkdir -p /opt/config/src/
-VOLUME /config-init/
COPY onap-cfg.tar.gz /tmp/
RUN tar -zxvf /tmp/onap-cfg.tar.gz -C /opt/config/src/
#!/bin/bash
+
+echo "Validating onap-parameters.yaml has been populated"
+[[ -z "$OPENSTACK_UBUNTU_14_IMAGE" ]] && { echo "Error: OPENSTACK_UBUNTU_14_IMAGE must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_PUBLIC_NET_ID" ]] && { echo "Error: OPENSTACK_PUBLIC_NET_ID must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_OAM_NETWORK_ID" ]] && { echo "Error: OPENSTACK_OAM_NETWORK_ID must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_OAM_SUBNET_ID" ]] && { echo "Error: OPENSTACK_OAM_SUBNET_ID must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_OAM_NETWORK_CIDR" ]] && { echo "Error: OPENSTACK_OAM_NETWORK_CIDR must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_USERNAME" ]] && { echo "Error: OPENSTACK_USERNAME must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_API_KEY" ]] && { echo "Error: OPENSTACK_API_KEY must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_REGION" ]] && { echo "Error: OPENSTACK_REGION must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_KEYSTONE_URL" ]] && { echo "Error: OPENSTACK_KEYSTONE_URL must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_FLAVOUR_MEDIUM" ]] && { echo "Error: OPENSTACK_FLAVOUR_MEDIUM must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_SERVICE_TENANT_NAME" ]] && { echo "Error: OPENSTACK_SERVICE_TENANT_NAME must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$DMAAP_TOPIC" ]] && { echo "Error: DMAAP_TOPIC must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$DEMO_ARTIFACTS_VERSION" ]] && { echo "Error: DEMO_ARTIFACTS_VERSION must be set in onap-parameters.yaml"; exit 1; }
+[[ -z "$OPENSTACK_TENANT_NAME" ]] && { echo "Error: OPENSTACK_TENANT_NAME must be set in onap-parameters.yaml"; exit 1; }
+
#make NAMESPACE directory
+echo "Creating $NAMESPACE directory if it doesn't exist"
mkdir -p /config-init/$NAMESPACE/
#unzip the configs in the NAMESPACEs directory ensuring no overwriting of files
+echo "Installing configuration files"
cp -vnpr /opt/config/src/* /config-init/$NAMESPACE/
#ensure db directories exist.
mkdir -p /config-init/$NAMESPACE/aai/elasticsearch/es-data/
mkdir -p /config-init/$NAMESPACE/aai/search-data-service/logs/
mkdir -p /config-init/$NAMESPACE/aai/data-router/logs/
+mkdir -p /config-init/$NAMESPACE/mso/mariadb/data
+echo "Setting permissions to container writeable directories"
chmod -R 777 /config-init/$NAMESPACE/sdc/logs/
chmod -R 777 /config-init/$NAMESPACE/portal/logs/
chmod -R 777 /config-init/$NAMESPACE/aai/aai-config/
chmod -R 777 /config-init/$NAMESPACE/aai/elasticsearch/es-data/
chmod -R 777 /config-init/$NAMESPACE/aai/search-data-service/logs/
chmod -R 777 /config-init/$NAMESPACE/aai/data-router/logs/
+chmod -R 777 /config-init/$NAMESPACE/policy/mariadb/
+echo "Substituting configuration parameters"
# replace the default 'onap' namespace qualification of K8s hostnames within the config files
find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/\.onap-/\.$NAMESPACE-/g" {} \;
+# set the ubuntu 14 image
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/UBUNTU_14_IMAGE_NAME_HERE/$OPENSTACK_UBUNTU_14_IMAGE/g" {} \;
+# set the openstack public network uuid
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_PUBLIC_NET_ID_HERE/$OPENSTACK_PUBLIC_NET_ID/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE/$OPENSTACK_OAM_NETWORK_ID/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_SUBNET_ID_WITH_ONAP_ROUTE_HERE/$OPENSTACK_OAM_SUBNET_ID/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s,NETWORK_CIDR_WITH_ONAP_ROUTE_HERE,$OPENSTACK_OAM_NETWORK_CIDR,g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_USERNAME_HERE/$OPENSTACK_USERNAME/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_PASSWORD_HERE/$OPENSTACK_API_KEY/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_REGION_HERE/$OPENSTACK_REGION/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s,OPENSTACK_KEYSTONE_IP_HERE,$OPENSTACK_KEYSTONE_URL,g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_FLAVOUR_MEDIUM_HERE/$OPENSTACK_FLAVOUR_MEDIUM/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/DMAAP_TOPIC_HERE/$DMAAP_TOPIC/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_SERVICE_TENANT_NAME_HERE/$OPENSTACK_SERVICE_TENANT_NAME/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/DEMO_ARTIFACTS_VERSION_HERE/$DEMO_ARTIFACTS_VERSION/g" {} \;
+
+# SDNC/Robot preload files manipulation
+OPENSTACK_OAM_NETWORK_CIDR_PREFIX=`cut -d. -f1-3 <<<"$OPENSTACK_OAM_NETWORK_CIDR"`
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE/$OPENSTACK_OAM_NETWORK_CIDR_PREFIX/g" {} \;
+
+# MSO post install steps to encrypt openstack password
+MSO_ENCRYPTION_KEY=$(cat /config-init/$NAMESPACE/mso/mso/encryption.key)
+OPENSTACK_API_ENCRYPTED_KEY=`echo -n "$OPENSTACK_API_KEY" | openssl aes-128-ecb -e -K $MSO_ENCRYPTION_KEY -nosalt | xxd -c 25 -p`
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_ENCRYPTED_PASSWORD_HERE/$OPENSTACK_API_ENCRYPTED_KEY/g" {} \;
+
+find /config-init/$NAMESPACE/ -type f -exec sed -i -e "s/OPENSTACK_TENANT_NAME_HERE/$OPENSTACK_TENANT_NAME/g" {} \;
+
+echo "Done!"
\ No newline at end of file
--- /dev/null
+0
+25
+ECOMP-PORTAL-OUTBOX-VID1 0 0
+PDPD-CONFIGURATION 0 2
+msgrtr.apinode.metrics.dmaap 1 26
+unauthenticated.SEC_MEASUREMENT_OUTPUT 1 1
+APPC-TEST2 0 0
+unauthenticated.TCA_EVENT_OUTPUT 1 1
+APPC-TEST1 0 0
+APPC-CL 0 2
+ECOMP-PORTAL-INBOX 0 0
+APPC-CL 1 0
+APPC-TEST2 1 1
+unauthenticated.TCA_EVENT_OUTPUT 0 1
+unauthenticated.SEC_MEASUREMENT_OUTPUT 0 1
+SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1 0 0
+POLICY-CL-MGT 1 1
+PDPD-CONFIGURATION 1 0
+DCAE-CL-EVENT 1 1
+msgrtr.apinode.metrics.dmaap 0 4
+ECOMP-PORTAL-OUTBOX-APP1 0 0
+ECOMP-PORTAL-OUTBOX-SDC1 0 0
+POLICY-CL-MGT 0 1
+SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1 0 0
+DCAE-CL-EVENT 0 1
+ECOMP-PORTAL-OUTBOX-DBC1 0 0
+ECOMP-PORTAL-OUTBOX-POL1 0 0
--- /dev/null
+0
+25
+ECOMP-PORTAL-OUTBOX-VID1 0 0
+PDPD-CONFIGURATION 0 2
+msgrtr.apinode.metrics.dmaap 1 26
+unauthenticated.SEC_MEASUREMENT_OUTPUT 1 1
+APPC-TEST2 0 0
+unauthenticated.TCA_EVENT_OUTPUT 1 1
+APPC-TEST1 0 0
+APPC-CL 0 2
+ECOMP-PORTAL-INBOX 0 0
+APPC-CL 1 0
+APPC-TEST2 1 1
+unauthenticated.TCA_EVENT_OUTPUT 0 1
+unauthenticated.SEC_MEASUREMENT_OUTPUT 0 1
+SDC-DISTR-NOTIF-TOPIC-SDC-OPENSOURCE-ENV1 0 0
+POLICY-CL-MGT 1 1
+PDPD-CONFIGURATION 1 0
+DCAE-CL-EVENT 1 1
+msgrtr.apinode.metrics.dmaap 0 4
+ECOMP-PORTAL-OUTBOX-APP1 0 0
+ECOMP-PORTAL-OUTBOX-SDC1 0 0
+POLICY-CL-MGT 0 1
+SDC-DISTR-STATUS-TOPIC-SDC-OPENSOURCE-ENV1 0 0
+DCAE-CL-EVENT 0 1
+ECOMP-PORTAL-OUTBOX-DBC1 0 0
+ECOMP-PORTAL-OUTBOX-POL1 0 0
--- /dev/null
+#!/bin/bash
+
+if [[ -z "$KAFKA_PORT" ]]; then
+ export KAFKA_PORT=9092
+fi
+if [[ -z "$KAFKA_ADVERTISED_PORT" ]]; then
+ export KAFKA_ADVERTISED_PORT=$(docker port `hostname` $KAFKA_PORT | sed -r "s/.*:(.*)/\1/g")
+fi
+if [[ -z "$KAFKA_BROKER_ID" ]]; then
+ # By default auto allocate broker ID
+ #export KAFKA_BROKER_ID=-1
+ export KAFKA_BROKER_ID=1
+fi
+#if [[ -z "$KAFKA_LOG_DIRS" ]]; then
+ #export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME"
+ export KAFKA_LOG_DIRS="/kafka/kafka-logs"
+#fi
+if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
+ export KAFKA_ZOOKEEPER_CONNECT=$(env | grep ZK.*PORT_2181_TCP= | sed -e 's|.*tcp://||' | paste -sd ,)
+fi
+
+if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
+ sed -r -i "s/(export KAFKA_HEAP_OPTS)=\"(.*)\"/\1=\"$KAFKA_HEAP_OPTS\"/g" $KAFKA_HOME/bin/kafka-server-start.sh
+ unset KAFKA_HEAP_OPTS
+fi
+
+if [[ -z "$KAFKA_ADVERTISED_HOST_NAME" && -n "$HOSTNAME_COMMAND" ]]; then
+ export KAFKA_ADVERTISED_HOST_NAME=$(eval $HOSTNAME_COMMAND)
+fi
+
+for VAR in `env`
+do
+ if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME ]]; then
+ kafka_name=`echo "$VAR" | sed -r "s/KAFKA_(.*)=.*/\1/g" | tr '[:upper:]' '[:lower:]' | tr _ .`
+ env_var=`echo "$VAR" | sed -r "s/(.*)=.*/\1/g"`
+ if egrep -q "(^|^#)$kafka_name=" $KAFKA_HOME/config/server.properties; then
+ sed -r -i "s@(^|^#)($kafka_name)=(.*)@\2=${!env_var}@g" $KAFKA_HOME/config/server.properties #note that no config values may contain an '@' char
+ else
+ echo "$kafka_name=${!env_var}" >> $KAFKA_HOME/config/server.properties
+ fi
+ fi
+done
+
+if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then
+ eval $CUSTOM_INIT_SCRIPT
+fi
+
+
+KAFKA_PID=0
+
+# see https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86#.bh35ir4u5
+term_handler() {
+ echo 'Stopping Kafka....'
+ if [ $KAFKA_PID -ne 0 ]; then
+ kill -s TERM "$KAFKA_PID"
+ wait "$KAFKA_PID"
+ fi
+ echo 'Kafka stopped.'
+ exit
+}
+
+
+# Capture kill requests to stop properly
+trap "term_handler" SIGHUP SIGINT SIGTERM
+create-topics.sh &
+$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties &
+KAFKA_PID=$!
+
+wait "$KAFKA_PID"
--- /dev/null
+###############################################################################
+##
+## Cambria API Server config
+##
+## - Default values are shown as commented settings.
+##
+
+###############################################################################
+##
+## HTTP service
+##
+## - 3904 is standard as of 7/29/14.
+#
+## Zookeeper Connection
+##
+## Both Cambria and Kafka make use of Zookeeper.
+##
+config.zk.servers=zookeeper.onap-dcae:2181
+#config.zk.servers=172.17.0.1:2181
+#dmaap.onap-dcae:2181
+#10.208.128.229:2181
+#config.zk.root=/fe3c/cambria/config
+
+
+###############################################################################
+##
+## Kafka Connection
+##
+## Items below are passed through to Kafka's producer and consumer
+## configurations (after removing "kafka.")
+## if you want to change request.required.acks it can take this one value
+#kafka.metadata.broker.list=localhost:9092,localhost:9093
+kafka.metadata.broker.list=kafka.onap-dcae:9092
+#kafka.metadata.broker.list=172.17.0.1:9092
+#dmaap.onap-dcae:9092
+#10.208.128.229:9092
+##kafka.request.required.acks=-1
+#kafka.client.zookeeper=${config.zk.servers}
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=6000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+
+###############################################################################
+##
+## Secured Config
+##
+## Some data stored in the config system is sensitive -- API keys and secrets,
+## for example. to protect it, we use an encryption layer for this section
+## of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
+#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+## Kafka expects live connections from the consumer to the broker, which
+## obviously doesn't work over connectionless HTTP requests. The Cambria
+## server proxies HTTP requests into Kafka consumer sessions that are kept
+## around for later re-use. Not doing so is costly for setup per request,
+## which would substantially impact a high volume consumer's performance.
+##
+## This complicates Cambria server failover, because we often need server
+## A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+#cambria.consumer.cache.touchFreqMs=120000
+
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+###############################################################################
+##
+## Metrics Reporting
+##
+## This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+
+##############################################################################
+#AAF Properties
+msgRtr.namespace.aaf=org.openecomp.dcae.dmaap.mtnje2.mr.topic
+msgRtr.topicfactory.aaf=org.openecomp.dcae.dmaap.topicFactory|:org.openecomp.dcae.dmaap.mtnje2.mr.topic:
+enforced.topic.name.AAF=org.openecomp
+forceAAF=false
+transidUEBtopicreqd=false
+defaultNSforUEB=org.openecomp.dmaap.mr.ueb
+##############################################################################
+#Mirror Maker Agent
+msgRtr.mirrormakeradmin.aaf=org.openecomp.dmaap.mr.dev.mirrormaker|*|admin
+msgRtr.mirrormakeruser.aaf=org.openecomp.dmaap.mr.dev.mirrormaker|*|user
+msgRtr.mirrormakeruser.aaf.create=org.openecomp.dmaap.mr.dev.topicFactory|:org.openecomp.dmaap.mr.dev.topic:
+msgRtr.mirrormaker.timeout=15000
+msgRtr.mirrormaker.topic=org.openecomp.dmaap.mr.prod.mm.agent
+msgRtr.mirrormaker.consumergroup=mmagentserver
+msgRtr.mirrormaker.consumerid=1
--- /dev/null
+basic_realm=openecomp.org
+basic_warn=TRUE
+
+cadi_loglevel=DEBUG
+#cadi_keyfile=target/swm/package/nix/dist_files/appl/${artifactId}/etc/keyfile2
+cadi_keyfile=/appl/dmaapMR1/etc/keyfile
+# Configure AAF
+aaf_url=https://DME2RESOLVE/service=org.openecomp.authz.AuthorizationService/version=2.0/envContext=DEV/routeOffer=BAU_SE
+
+aaf_id=dgl@openecomp.org
+aaf_password=enc:f2u5br1mh29M02-
+aaf_timeout=5000
+aaf_clean_interval=1200000
+aaf_user_expires=60000
+aaf_high_count=1000000
+
+
+# The following properties are being set by the AJSC Container and should NOT need to be set here.
+AFT_LATITUDE=33.823589
+AFT_LONGITUDE=-84.366982
+AFT_ENVIRONMENT=AFTUAT
--- /dev/null
+_sNOLphPzrU7L0L3oWv0pYwgV_ddGF1XoBsQEIAp34jfP-fGJFPfFYaMpDEZ3gwH59rNw6qyMZHk
+k-4irklvVcWk36lC3twNvc0DueRCVrws1bkuhOLCXdxHJx-YG-1xM8EJfRmzh79WPlPkbAdyPmFF
+Ah44V0GjAnInPOFZA6MHP9rNx9B9qECHRfmvzU13vJCcgTsrmOr-CEiWfRsnzPjsICxpq9OaVT_D
+zn6rNaroGm1OiZNCrCgvRkCUHPOOCw3j9G1GeaImoZNYtozbz9u4sj13PU-MxIIAa64b1bMMMjpz
+Upc8lVPI4FnJKg6axMmEGn5zJ6JUq9mtOVyPj__2GEuDgpx5H4AwodXXVjFsVgR8UJwI_BvS2JVp
+JoQk0J1RqXmAXVamlsMAfzmmbARXgmrBfnuhveZnh9ymFVU-YZeujdANniXAwBGI7c6hG_BXkH7i
+Eyf4Fn41_SV78PskP6qgqJahr9r3bqdjNbKBztIKCOEVrE_w3IM5r02l-iStk_NBRkj6cq_7VCpG
+afxZ2CtZMwuZMiypO_wOgbdpCSKNzsL-NH2b4b08OlKiWb263gz634KJmV5WEfCl-6eH-JUFbWOS
+JwQfActLNT2ZQPl2MyZQNBzJEWoJRgS6k7tPRO-zqeUtYYHGHVMCxMuMHGQcoilNNHEFeBCG_fBh
+yAKb9g9F86Cbx9voMLiyTX2T3rwVHiSJFOzfNxGmfN5JWOthIun_c5hEY1tLQ15BomzkDwk7BAj7
+VbRCrVD45B6xrmSTMBSWYmLyr6mnQxQqeh9cMbD-0ZAncE3roxRnRvPKjFFa208ykYUp2V83r_PJ
+fV5I9ZPKSjk9DwFyrjkcQQEYDhdK6IFqcd6nEthjYVkmunu2fsX0bIOm9GGdIbKGqBnpdgBO5hyT
+rBr9HSlZrHcGdti1R823ckDF0Ekcl6kioDr5NLIpLtg9zUEDRm3QrbX2mv5Zs8W0pYnOqglxy3lz
+bJZTN7oR7VasHUtjmp0RT9nLZkUs5TZ6MHhlIq3ZsQ6w_Q9Rv1-ofxfwfCC4EBrWKbWAGCf6By4K
+Ew8321-2YnodhmsK5BrT4zQ1DZlmUvK8BmYjZe7wTljKjgYcsLTBfX4eMhJ7MIW1kpnl8AbiBfXh
+QzN56Mki51Q8PSQWHm0W9tnQ0z6wKdck6zBJ8JyNzewZahFKueDTn-9DOqIDfr3YHvQLLzeXyJ8e
+h4AgjW-hvlLzRGtkCknjLIgXVa3rMTycseAwbW-mgdCqqkw3SdEG8feAcyntmvE8j2jbtSDStQMB
+9JdvyNLuQdNG4pxpusgvVso0-8NQF0YVa9VFwg9U6IPSx5p8FcW68OAHt_fEgT4ZtiH7o9aur4o9
+oYqUh2lALCY-__9QLq1KkNjMKs33Jz9E8LbRerG9PLclkTrxCjYAeUWBjCwSI7OB7xkuaYDSjkjj
+a46NLpdBN1GNcsFFcZ79GFAK0_DsyxGLX8Tq6q0Bvhs8whD8wlSxpTGxYkyqNX-vcb7SDN_0WkCE
+XSdZWkqTHXcYbOvoCOb_e6SFAztuMenuHWY0utX0gBfx_X5lPDFyoYXErxFQHiA7t27keshXNa6R
+ukQRRS8kMjre1U74sc-fRNXkXpl57rG4rgxaEX0eBeowa53KAsVvUAoSac2aC_nfzXrDvoyf9Xi3
+JpEZNhUDLpFCEycV4I7jGQ9wo9qNaosvlsr6kbLDNdb_1xrGVgjT3xEvRNJNPqslSAu-yD-UFhC3
+AmCdYUnugw_eEFqXCHTARcRkdPPvl2XsmEKY2IqEeO5tz4DyXQFaL-5hEVh6lYEU1EOWHk3UGIXe
+Vc5_Ttp82qNLmlJPbZvgmNTJzYTHDQ_27KBcp7IVVZgPDjVKdWqQvZ18KhxvfF3Idgy82LBZniFV
+IbtxllXiPRxoPQriSXMnXjh3XkvSDI2pFxXfEvLRn1tvcFOwPNCz3QfPIzYg8uYXN5bRt3ZOrR_g
+ZhIlrc7HO0VbNbeqEVPKMZ-cjkqGj4VAuDKoQc0eQ6X_wCoAGO78nPpLeIvZPx1X3z5YoqNA
\ No newline at end of file
"asdcAddress": "sdc-be.onap-sdc.svc.cluster.local:8443",
"consumerGroup": "sdc-OpenSource-Env1",
"consumerId": "sdc-COpenSource-Env11",
- "environmentName": "SDC-OpenSource-Env1",
+ "environmentName": "DMAAP_TOPIC_HERE",
"keyStorePassword": "",
"keyStorePath": "",
"password": "613AF3483E695524F9857643B697FA51C7A9A0951094F53791485BF3458F9EADA37DBACCCEBD0CB242B85B4062745247",
"checkrequiredparameters": "true",
"cloud_sites": [{
"aic_version": "2.5",
- "id": "Ottawa",
- "identity_service_id": "KVE5076_OPENSTACK",
- "lcp_clli": "RegionOne",
- "region_id": "RegionOne"
+ "id": "OPENSTACK_REGION_HERE",
+ "identity_service_id": "DEFAULT_KEYSTONE",
+ "lcp_clli": "OPENSTACK_REGION_HERE",
+ "region_id": "OPENSTACK_REGION_HERE"
}],
"identity_services": [{
- "admin_tenant": "services",
- "dcp_clli": "KVE5076_OPENSTACK",
+ "admin_tenant": "OPENSTACK_SERVICE_TENANT_NAME_HERE",
+ "dcp_clli": "DEFAULT_KEYSTONE",
"identity_authentication_type": "USERNAME_PASSWORD",
"identity_server_type": "KEYSTONE",
- "identity_url": "http://OPENSTACK_KEYSTONE_IP_HERE:5000/v2.0",
+ "identity_url": "OPENSTACK_KEYSTONE_IP_HERE/v2.0",
"member_role": "admin",
- "mso_id": "dev",
- "mso_pass": "dcdc0d9e4d69a667c67725a9e466e6c3",
+ "mso_id": "OPENSTACK_USERNAME_HERE",
+ "mso_pass": "OPENSTACK_ENCRYPTED_PASSWORD_HERE",
"tenant_metadata": "true"
}],
"nwbpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1",
--- /dev/null
+default-character-set=latin1
+default-collation=latin1_swedish_ci
--- /dev/null
+ÿlocalhost root \ 1 VUÚXÿae9df72d0f92 root \ 1 VUÚX
\ No newline at end of file
--- /dev/null
+default-character-set=utf8
+default-collation=utf8_general_ci
--- /dev/null
+default-character-set=latin1
+default-collation=latin1_swedish_ci
--- /dev/null
+default-character-set=latin1
+default-collation=latin1_swedish_ci
COMPONENT_X_MX_MB=1024
COMPONENT_X_MS_MB=1024
-REST_PAP_URL=http://pap:9091/pap/
-REST_PDP_ID=http://pdp:8081/pdp/
+REST_PAP_URL=http://pap.onap-policy:9091/pap/
+REST_PDP_ID=http://pdp.onap-policy:8081/pdp/
PDP_HTTP_USER_ID=testpdp
PDP_HTTP_PASSWORD=alpha123
REST_CONFIG_HOME=${{POLICY_HOME}}/servers/pap/webapps/Config/
REST_ACTION_HOME=${{POLICY_HOME}}/servers/pap/webapps/Action/
-REST_CONFIG_URL=http://pap:9091/
+REST_CONFIG_URL=http://pap.onap-policy:9091/
REST_CONFIG_WEBAPPS=${{POLICY_HOME}}/servers/pap/webapps/
# PAP account information
+++ /dev/null
-192.168.141.230
# pap properties
PAP_PDPS=${{POLICY_HOME}}/servers/pap/bin/pdps
-PAP_URL=http://pap:9091/pap/
+PAP_URL=http://pap.onap-policy:9091/pap/
PAP_INITIATE_PDP=true
PAP_HEARTBEAT_INTERVAL=10000
# PDP related properties
-PAP_PDP_URL=http://pdp:8081/pdp/
+PAP_PDP_URL=http://pdp.onap-policy:8081/pdp/
PAP_PDP_HTTP_USER_ID=testpdp
PAP_PDP_HTTP_PASSWORD=alpha123
LOGPARSER_X_MS_MB=1024
LOGPARSER_X_MX_MB=1024
-SERVER=http://pap:9091/pap/
+SERVER=http://pap.onap-policy:9091/pap/
LOGPATH=${{POLICY_HOME}}/servers/pap/logs/pap-rest.log
PARSERLOGPATH=IntegrityMonitor.log
UEB_CLUSTER=dmaap.onap-message-router
-REST_PAP_URL=http://pap:9091/pap/
-REST_PDP_ID=http://pdp:8081/pdp/
+REST_PAP_URL=http://pap.onap-policy:9091/pap/
+REST_PDP_ID=http://pdp.onap-policy:8081/pdp/
REST_PDP_CONFIG=${{POLICY_HOME}}/servers/pdp/bin/config
REST_PDP_WEBAPPS=${{POLICY_HOME}}/servers/pdp/webapps
REST_PDP_REGISTER=true
LOGPARSER_X_MS_MB=1024
LOGPARSER_X_MX_MB=1024
-SERVER=http://pdp:8081/pdp/
+SERVER=http://pdp.onap-policy:8081/pdp/
LOGPATH=${{POLICY_HOME}}/servers/pdp/logs/pdp-rest.log
PARSERLOGPATH=IntegrityMonitor.log
+++ /dev/null
-#! /bin/bash
-
-
-echo "Pushing default policies"
-
-# Sometimes brmsgw gets an error when trying to retrieve the policies on initial push,
-# so for the BRMS policies we will do a push, then delete from the pdp group, then push again.
-# Second push should be successful.
-
-curl -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
- "pdpGroup": "default",
- "policyName": "vFirewall",
- "policyScope": "com",
- "policyType": "MicroService"
-}' 'http://pypdp:8480/PyPDPServer/pushPolicy'
-
-sleep 2
-
-curl -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
- "pdpGroup": "default",
- "policyName": "vLoadBalancer",
- "policyScope": "com",
- "policyType": "MicroService"
-}' 'http://pypdp:8480/PyPDPServer/pushPolicy'
-
-sleep 2
-curl -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
- "pdpGroup": "default",
- "policyName": "BRMSParamvLBDemoPolicy",
- "policyScope": "com",
- "policyType": "BRMS_Param"
-}' 'http://pypdp:8480/PyPDPServer/pushPolicy'
-
-sleep 2
-
-curl -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
- "pdpGroup": "default",
- "policyName": "BRMSParamvFWDemoPolicy",
- "policyScope": "com",
- "policyType": "BRMS_Param"
-}' 'http://pypdp:8480/PyPDPServer/pushPolicy'
-
-sleep 2
-
-curl -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
-"pdpGroup": "default",
-"policyComponent": "PDP",
-"policyName": "com.Config_BRMS_Param_BRMSParamvFWDemoPolicy.1.xml"
-}' 'http://pypdp:8480/PyPDPServer/deletePolicy'
-
-
-
-curl -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
-"pdpGroup": "default",
-"policyComponent": "PDP",
-"policyName": "com.Config_BRMS_Param_BRMSParamvLBDemoPolicy.1.xml"
-}' 'http://pypdp:8480/PyPDPServer/deletePolicy'
-
-sleep 2
-curl -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
- "pdpGroup": "default",
- "policyName": "BRMSParamvLBDemoPolicy",
- "policyScope": "com",
- "policyType": "BRMS_Param"
-}' 'http://pypdp:8480/PyPDPServer/pushPolicy'
-
-sleep 2
-
-curl -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHJlc3Q6M2MwbXBVI2gwMUBOMWMz' --header 'Environment: TEST' -d '{
- "pdpGroup": "default",
- "policyName": "BRMSParamvFWDemoPolicy",
- "policyScope": "com",
- "policyType": "BRMS_Param"
-}' 'http://pypdp:8480/PyPDPServer/pushPolicy'
-
+++ /dev/null
-# pypdp component installation configuration parameters
-
-# tomcat specific parameters
-
-TOMCAT_JMX_PORT=9994
-TOMCAT_SHUTDOWN_PORT=8405
-SSL_HTTP_CONNECTOR_PORT=8480
-SSL_AJP_CONNECTOR_PORT=8384
-SSL_AJP_CONNECTOR_REDIRECT_PORT=8443
-
-TOMCAT_X_MS_MB=1024
-TOMCAT_X_MX_MB=1024
-
-# pypdp parameters
-
-PDP_URL=http://pdp:8081/pdp/,testpdp,alpha123
-PAP_URL=http://pap:9091/pap/,testpap,alpha123
-PYPDP_ID=testrest
-PYPDP_PASSWORD=3c0mpU#h01@N1c3
-
-node_type=pypdp
-# the java property is RESOURCE_NAME (uppercase), but the conf parameter is lowercase
-resource_name=pypdp_1
-
-CLIENT_FILE=client.properties
"pub_key" : "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAqqnA9BAiMLtjOPSYBfhzLu4CiBolWoskDg4KVwhTJVTTeB6CqrQNcadlGXxOHhCYuNCKkUmIVF4WTOisVOJ75Z1c4OMoZLL85xVPKSIeH63kgVugwgPYQu55NbbWX+rsbUha3LnElDhNviMM3iTPbD5nnhKixNERAJMTLKXvZZZGqxW94bREknYPQTT2qrk3YRqwldncopp6Nkgv3AnSJz2gc9tjxnWF0poTQnQm/3D6hiJICrzKfAV0EaPN0KdtYjPhKrYoy6Qb/tKOVaaqsvwfKBJGrT9LfcA7D7M/yj292RT1XN63hI84WC383LsaPJ6eWdDTE6zUP1eGTWCoOw== rsa-key-20161026",
"repo_url_blob" : "https://nexus.onap.org/content/repositories/raw",
"repo_url_artifacts" : "https://nexus.onap.org/content/groups/staging",
- "demo_artifacts_version" : "1.0.0",
+ "demo_artifacts_version" : "DEMO_ARTIFACTS_VERSION_HERE",
"ecomp_private_net_id" : "OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE",
"ecomp_private_subnet_id" : "OPENSTACK_SUBNET_ID_WITH_ONAP_ROUTE_HERE",
"ecomp_private_net_cidr" : "NETWORK_CIDR_WITH_ONAP_ROUTE_HERE",
"protected_private_net_cidr" : "192.168.20.0/24",
"vfw_private_ip_0" : "192.168.10.100",
"vfw_private_ip_1" : "192.168.20.100",
- "vfw_private_ip_2" : "192.168.30.5",
+ "vfw_private_ip_2" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.3",
"vpg_private_ip_0" : "192.168.10.200",
- "vpg_private_ip_1" : "192.168.30.3",
+ "vpg_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.4",
"vsn_private_ip_0" : "192.168.20.250",
- "vsn_private_ip_1" : "192.168.30.4",
+ "vsn_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.5",
'vfw_name_0':'vofwl01fwl${hostid}',
'vpg_name_0':'vofwl01pgn${hostid}',
'vsn_name_0':'vofwl01snk${hostid}',
"vlb_private_net_id" : "volb01_private${hostid}",
"vlb_private_net_cidr" : "192.168.30.0/24",
"vlb_private_ip_0" : "192.168.30.100",
- "vlb_private_ip_1" : "192.168.30.4",
+ "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.6",
"vdns_private_ip_0" : "192.168.30.110",
- "vdns_private_ip_1" : "192.168.30.5",
+ "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.7",
'vlb_name_0':'vovlblb${hostid}',
'vdns_name_0':'vovlbdns${hostid}',
},
"dnsscaling_preload.template" : {
"vlb_private_net_id" : "volb01_private${hostid}",
"vlb_private_ip_0" : "192.168.30.100",
- "vlb_private_ip_1" : "192.168.30.4",
+ "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.8",
"vdns_private_ip_0" : "192.168.30.222",
- "vdns_private_ip_1" : "192.168.30.6",
+ "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.9",
'scaling_vdns_name_0':'vovlbscaling${hostid}',
},
"vvg_preload.template" : {
"protected_private_net_cidr" : "192.168.120.0/24",
"vfw_private_ip_0" : "192.168.110.100",
"vfw_private_ip_1" : "192.168.120.100",
- "vfw_private_ip_2" : "192.168.30.11",
+ "vfw_private_ip_2" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.10",
"vpg_private_ip_0" : "192.168.110.200",
- "vpg_private_ip_1" : "192.168.30.12",
+ "vpg_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.11",
"vsn_private_ip_0" : "192.168.120.250",
- "vsn_private_ip_1" : "192.168.30.13",
+ "vsn_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.12",
'vfw_name_0':'clfwl01fwl${hostid}',
'vpg_name_0':'clfwl01pgn${hostid}',
'vsn_name_0':'clfwl01snk${hostid}',
"vlb_private_net_id" : "cllb01_private${hostid}",
"vlb_private_net_cidr" : "192.168.130.0/24",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "192.168.30.14",
+ "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.13",
"vdns_private_ip_0" : "192.168.130.110",
- "vdns_private_ip_1" : "192.168.30.15",
+ "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.14",
'vlb_name_0':'clvlblb${hostid}',
'vdns_name_0':'clvlbdns${hostid}',
},
"dnsscaling_preload.template" : {
"vlb_private_net_id" : "cllb01_private${hostid}",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "192.168.30.14",
+ "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.15",
"vdns_private_ip_0" : "192.168.130.222",
- "vdns_private_ip_1" : "192.168.30.16",
+ "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.16",
'scaling_vdns_name_0':'clvlbscaling${hostid}',
},
"vvg_preload.template" : {
"protected_private_net_cidr" : "192.168.120.0/24",
"vfw_private_ip_0" : "192.168.110.100",
"vfw_private_ip_1" : "192.168.120.100",
- "vfw_private_ip_2" : "192.168.30.11",
+ "vfw_private_ip_2" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.17",
"vpg_private_ip_0" : "192.168.110.200",
- "vpg_private_ip_1" : "192.168.30.12",
+ "vpg_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.18",
"vsn_private_ip_0" : "192.168.120.250",
- "vsn_private_ip_1" : "192.168.30.13",
+ "vsn_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.19",
'vfw_name_0':'demofwl01fwl',
'vpg_name_0':'demofwl01pgn',
'vsn_name_0':'demofwl01snk',
"vlb_private_net_id" : "demolb_private",
"vlb_private_net_cidr" : "192.168.130.0/24",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "192.168.30.14",
+ "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.20",
"vdns_private_ip_0" : "192.168.130.110",
- "vdns_private_ip_1" : "192.168.30.15",
+ "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.21",
'vlb_name_0':'demovlblb',
'vdns_name_0':'demovlbdns',
},
"dnsscaling_preload.template" : {
"vlb_private_net_id" : "demolb_private",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "192.168.30.16",
+ "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.22",
"vdns_private_ip_0" : "192.168.130.222",
- "vdns_private_ip_1" : "192.168.30.17",
+ "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.23",
'scaling_vdns_name_0':'demovlbscaling',
},
"vvg_preload.template" : {
}
}
}
+
GLOBAL_MSO_USERNAME = "InfraPortalClient"
GLOBAL_MSO_PASSWORD = "password1$"
# openstack info - info to select right info in environment
-GLOBAL_OPENSTACK_TENANT_NAME = ""
+GLOBAL_OPENSTACK_TENANT_NAME = "OPENSTACK_TENANT_NAME_HERE"
# packet generate vnf info - everything is from the private oam network (also called ecomp private network)
GLOBAL_PACKET_GENERATOR_PORT = "8183"
GLOBAL_PACKET_GENERATOR_USERNAME = "admin"
GLOBAL_VID_HEALTH_USERNAME = "Default"
GLOBAL_VID_HEALTH_PASSWORD = "AppPassword!1"
#global selenium info
-GLOBAL_PROXY_WARNING_TITLE=""
-GLOBAL_PROXY_WARNING_CONTINUE_XPATH=""
+GLOBAL_PROXY_WARNING_TITLE = ""
+GLOBAL_PROXY_WARNING_CONTINUE_XPATH = ""
# settings for vm to attach vvg too
-GLOBAL_VVGSERVER_IMAGE = "Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)"
-GLOBAL_VVGSERVER_FLAVOR = "4 GB General Purpose v1"
+GLOBAL_VVGSERVER_IMAGE = "UBUNTU_14_IMAGE_NAME_HERE"
+GLOBAL_VVGSERVER_FLAVOR = "OPENSTACK_FLAVOUR_MEDIUM_HERE"
# dns info
-GLOBAL_DNS_TRAFFIC_DURATION = "600"
\ No newline at end of file
+GLOBAL_DNS_TRAFFIC_DURATION = "600"
GLOBAL_INJECTED_DNS_IP_ADDR = "10.0.100.1"
GLOBAL_INJECTED_DOCKER_VERSION = "1.1-STAGING-latest"
GLOBAL_INJECTED_GERRIT_BRANCH = "master"
-GLOBAL_INJECTED_KEYSTONE = "http://10.100.100.10:5000"
+GLOBAL_INJECTED_KEYSTONE = "OPENSTACK_KEYSTONE_IP_HERE"
GLOBAL_INJECTED_MR_IP_ADDR = "dmaap.onap-message-router"
GLOBAL_INJECTED_MSO_IP_ADDR = "mso.onap-mso"
-GLOBAL_INJECTED_NETWORK = "oam_ecomp_1b5B"
+GLOBAL_INJECTED_NETWORK = "OPENSTACK_OAM_NETWORK_ID_HERE"
GLOBAL_INJECTED_NEXUS_DOCKER_REPO = "nexus3.onap.org:10001"
GLOBAL_INJECTED_NEXUS_PASSWORD = "docker"
GLOBAL_INJECTED_NEXUS_REPO = "https://nexus.onap.org/content/sites/raw"
GLOBAL_INJECTED_NEXUS_USERNAME = "docker"
-GLOBAL_INJECTED_OPENSTACK_PASSWORD = "dev"
-GLOBAL_INJECTED_OPENSTACK_USERNAME = "dev"
+GLOBAL_INJECTED_OPENSTACK_PASSWORD = "OPENSTACK_PASSWORD_HERE"
+GLOBAL_INJECTED_OPENSTACK_USERNAME = "OPENSTACK_USERNAME_HERE"
GLOBAL_INJECTED_POLICY_IP_ADDR = "pypdp.onap-policy"
GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR = "drools.onap-policy"
GLOBAL_INJECTED_PORTAL_IP_ADDR = "portalapps.onap-portal"
-GLOBAL_INJECTED_REGION = "RegionOne"
+GLOBAL_INJECTED_REGION = "OPENSTACK_REGION_HERE"
GLOBAL_INJECTED_SDC_FE_IP_ADDR = "sdc-fe.onap-sdc"
GLOBAL_INJECTED_SDC_BE_IP_ADDR = "sdc-be.onap-sdc"
GLOBAL_INJECTED_SDNC_IP_ADDR = "sdnhost.onap-sdnc"
--- /dev/null
+parameters:
+ public_net_id: OPENSTACK_PUBLIC_NET_ID_HERE
+ bono_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ sprout_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ homer_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ homestead_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ ralf_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ ellis_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ dns_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ bono_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ sprout_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ homer_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ homestead_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ ralf_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ ellis_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ dns_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ repo_url: http://repo.cw-ngv.com/stable
+ zone: me.cw-ngv.com
+ dn_range_start: "2425550000"
+ dn_range_length: "10000"
+ dnssec_key: 9FPdYTWhk5+LbhrqtTPQKw==
\ No newline at end of file
--- /dev/null
+parameters:
+ vfw_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ vfw_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ public_net_id: OPENSTACK_PUBLIC_NET_ID_HERE
\ No newline at end of file
--- /dev/null
+parameters:
+ vlb_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ vlb_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ public_net_id: OPENSTACK_PUBLIC_NET_ID_HERE
\ No newline at end of file
--- /dev/null
+parameters:
+ vlb_image_name: UBUNTU_14_IMAGE_NAME_HERE
+ vlb_flavor_name: OPENSTACK_FLAVOUR_MEDIUM_HERE
+ public_net_id: OPENSTACK_PUBLIC_NET_ID_HERE
\ No newline at end of file
+++ /dev/null
-*** Settings ***
-Documentation The main interface for interacting with Openstack Keystone API. It handles low level stuff like managing the authtoken and Openstack required fields
-Library OpenstackLibrary
-Library RequestsLibrary
-Library UUID
-Library Collections
-Library OperatingSystem
-Resource ../global_properties.robot
-Resource ../json_templater.robot
-Resource openstack_common.robot
-
-*** Variables ***
-${OPENSTACK_KEYSTONE_API_VERSION} /v2.0
-${OPENSTACK_KEYSTONE_AUTH_PATH} /tokens
-${OPENSTACK_KEYSTONE_AUTH_BODY_FILE} robot/assets/templates/keystone_get_auth.template
-${OPENSTACK_KEYSTONE_TENANT_PATH} /tenants
-
-*** Keywords ***
-Run Openstack Auth Request
- [Documentation] Runs an Openstack Auth Request and returns the token and service catalog. you need to include the token in future request's x-auth-token headers. Service catalog describes what can be called
- [Arguments] ${alias} ${username}= ${password}=
- ${username} ${password}= Set Openstack Credentials ${username} ${password}
- ${session}= Create Session keystone ${GLOBAL_OPENSTACK_KEYSTONE_SERVER} verify=True
- ${uuid}= Generate UUID
- ${data_template}= OperatingSystem.Get File ${OPENSTACK_KEYSTONE_AUTH_BODY_FILE}
- ${arguments}= Create Dictionary username=${username} password=${password}
- ${data}= Fill JSON Template ${data_template} ${arguments}
- ${data_path}= Catenate ${OPENSTACK_KEYSTONE_API_VERSION}${OPENSTACK_KEYSTONE_AUTH_PATH}
- ${headers}= Create Dictionary Accept=application/json Content-Type=application/json X-TransactionId=${GLOBAL_APPLICATION_ID}-${uuid} X-FromAppId=${GLOBAL_APPLICATION_ID}
- Log Sending authenticate post request ${data_path} with headers ${headers} and data ${data}
- ${resp}= Post Request keystone ${data_path} data=${data} headers=${headers}
- Save Openstack Auth ${alias} ${resp.text}
- Log Received response from keystone ${resp.text}
-
-Get Openstack Tenants
- [Documentation] Returns all the openstack tenant info
- [Arguments] ${alias}
- ${resp}= Internal Get Openstack With Region ${alias} ${GLOBAL_OPENSTACK_KEYSTONE_SERVICE_TYPE} region= url_ext=${OPENSTACK_KEYSTONE_TENANT_PATH} data_path=
- [Return] ${resp.json()}
-
-Get Openstack Tenant
- [Documentation] Returns the openstack tenant info for the specified tenantid
- [Arguments] ${alias} ${tenant_id}
- ${resp}= Internal Get Openstack With Region ${alias} ${GLOBAL_OPENSTACK_KEYSTONE_SERVICE_TYPE} region= url_ext=${OPENSTACK_KEYSTONE_TENANT_PATH} data_path=/${tenant_id}
- [Return] ${resp.json()}
-
-Set Openstack Credentials
- [Arguments] ${username} ${password}
- Return From Keyword If '${username}' != '' ${username} ${password}
- ${user} ${pass}= Get Openstack Credentials
- [Return] ${user} ${pass}
-
-Get Openstack Credentials
- Dictionary Should Contain Key ${GLOBAL_VM_PROPERTIES} openstack_username
- [Return] ${GLOBAL_VM_PROPERTIES['openstack_username']} ${GLOBAL_VM_PROPERTIES['openstack_password']}
\ No newline at end of file
--- /dev/null
+OPENSTACK_UBUNTU_14_IMAGE: "Ubuntu_14.04.5_LTS"
+OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
+OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
+OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
+OPENSTACK_OAM_NETWORK_CIDR: "192.168.30.0/24"
+OPENSTACK_USERNAME: "vnf_user"
+OPENSTACK_API_KEY: "vnf_password"
+OPENSTACK_TENANT_NAME: "vnfs"
+OPENSTACK_REGION: "RegionOne"
+OPENSTACK_KEYSTONE_URL: "http://1.2.3.4:5000"
+OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
+OPENSTACK_SERVICE_TENANT_NAME: "services"
+DMAAP_TOPIC: "AUTO"
+DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
--- /dev/null
+OPENSTACK_UBUNTU_14_IMAGE: ""
+OPENSTACK_PUBLIC_NET_ID: ""
+OPENSTACK_OAM_NETWORK_ID: ""
+OPENSTACK_OAM_SUBNET_ID: ""
+OPENSTACK_OAM_NETWORK_CIDR: ""
+OPENSTACK_USERNAME: ""
+OPENSTACK_API_KEY: ""
+OPENSTACK_TENANT_NAME: ""
+OPENSTACK_REGION: ""
+OPENSTACK_KEYSTONE_URL: ""
+OPENSTACK_FLAVOUR_MEDIUM: ""
+OPENSTACK_SERVICE_TENANT_NAME: ""
+DMAAP_TOPIC: ""
+DEMO_ARTIFACTS_VERSION: ""
\ No newline at end of file
+++ /dev/null
-apiVersion: v1
-kind: Pod
-metadata:
- name: config-init
-spec:
- containers:
- - name: config-init
- env:
- - name: NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- image: oomk8s/config-init:1.1.0
- imagePullPolicy: Always
- volumeMounts:
- - name: config-init-root
- mountPath: /config-init/
- volumes:
- - name: config-init-root
- hostPath:
- path: /dockerdata-nfs/
- restartPolicy: Never
--- /dev/null
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: global-onap-configmap
+ namespace: "{{ .Values.nsPrefix }}"
+data:
+ {{ (.Files.Glob "onap-parameters.yaml").AsConfig | indent 2 }}
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: {{ .Chart.Name }}
+ namespace: "{{ .Values.nsPrefix }}"
+spec:
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ envFrom:
+ - configMapRef:
+ name: global-onap-configmap
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: NAMESPACE_PREFIX
+ value: {{ .Values.nsPrefix }}
+ - name: DOCKER_SHARE_PATH
+ value: {{ .Values.dockerSharePath }}
+ volumeMounts:
+ - name: config-init-root
+ mountPath: /config-init/
+ volumes:
+ - name: config-init-root
+ hostPath:
+ path: {{ .Values.dockerSharePath }}
+ restartPolicy: Never
\ No newline at end of file
--- /dev/null
+# Default values for config.
+nsPrefix: onap
+dockerSharePath: /dockerdata-nfs
+image:
+ repository: oomk8s/config-init
+ tag: 1.1.0
+ pullPolicy: Always
\ No newline at end of file
hostPath:
path: /var/run/docker.sock
- name: kafka-data
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/
+ persistentVolumeClaim:
+ claimName: message-router-kafka
- name: start-kafka
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/message-router/dcae-startup-vm-message-router/docker_files/start-kafka.sh
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: message-router-kafka
+ namespace: "{{ .Values.nsPrefix }}-message-router"
+ labels:
+ name: message-router-kafka
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/message-router/dcae-startup-vm-message-router/docker_files/data-kafka/
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: message-router-kafka
+ namespace: "{{ .Values.nsPrefix }}-message-router"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: message-router-kafka
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: message-router-zookeeper
+ namespace: "{{ .Values.nsPrefix }}-message-router"
+ labels:
+ name: message-router-zookeeper
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: message-router-zookeeper
+ namespace: "{{ .Values.nsPrefix }}-message-router"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: message-router-zookeeper
restartPolicy: Always
volumes:
- name: zookeeper-data
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/message-router/dcae-startup-vm-message-router/docker_files/data-zookeeper
+ persistentVolumeClaim:
+ claimName: message-router-zookeeper
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
name: mso-mariadb-conf
- mountPath: /docker-entrypoint-initdb.d
name: mso-mariadb-docker-entrypoint-initdb
+ - mountPath: /var/lib/mysql
+ name: mso-mariadb-data
ports:
- containerPort: 3306
name: mariadb
- name: mso-mariadb-docker-entrypoint-initdb
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/mso/mariadb/docker-entrypoint-initdb.d
+ - name: mso-mariadb-data
+ persistentVolumeClaim:
+ claimName: mso-db
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: mso-db
+ namespace: "{{ .Values.nsPrefix }}-mso"
+ labels:
+ name: mso-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/mso/mariadb/data
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: mso-db
+ namespace: "{{ .Values.nsPrefix }}-mso"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: mso-db
}
create_onap_helm() {
- helm install ../$2/ --name $1-$2 --namespace $1 --set nsPrefix=$1 --set nodePortPrefix=$3
+ helm install ../$2/ --name $1-$2 --namespace $1 --set nsPrefix=$1,nodePortPrefix=$3
}
"--container-name",
"pap",
"--container-name",
- "pdp",
- "--container-name",
- "pypdp"
+ "pdp"
],
"command": [
"/root/ready.py"
}
}
],
- "image": "{{ .Values.image.readiness }}",
+ "image": "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}",
"imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "brmsgw-readiness"
}
- /bin/bash
- ./do-start.sh
- brmsgw
- image: {{ .Values.image.policyPe }}
+ image: "{{ .Values.image.policyPe }}:{{ .Values.image.policyPeVersion}}"
imagePullPolicy: {{ .Values.pullPolicy }}
name: brmsgw
volumeMounts:
"--container-name",
"pdp",
"--container-name",
- "pypdp",
- "--container-name",
"brmsgw"
],
"command": [
}
}
],
- "image": "{{ .Values.image.readiness }}",
+ "image": "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}",
"imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "drools-readiness"
}
- /bin/bash
- -c
- ./do-start.sh
- image: {{ .Values.image.policyDrools }}
+ image: "{{ .Values.image.policyDrools }}:{{ .Values.image.policyDroolsVersion }}"
imagePullPolicy: {{ .Values.pullPolicy }}
name: drools
ports:
- /bin/bash
- -c
- exec bash /tmp/do-start.sh
- image: {{ .Values.image.policyDb }}
+ image: "{{ .Values.image.policyDb }}:{{ .Values.image.policyDbVersion }}"
imagePullPolicy: {{ .Values.pullPolicy }}
name: mariadb
ports:
- containerPort: 3306
+ volumeMounts:
+ - mountPath: /var/lib/mysql
+ name: policy-mariadb-data
readinessProbe:
tcpSocket:
port: 3306
initialDelaySeconds: 5
periodSeconds: 10
+ volumes:
+ - name: policy-mariadb-data
+ persistentVolumeClaim:
+ claimName: policy-db
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
}
}
],
- "image": "{{ .Values.image.readiness }}",
+ "image": "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}",
"imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "nexus-readiness"
}
- /bin/bash
- -c
- bash -c "/opt/nexus/nexus-2.14.2-01/bin/nexus start && sleep 1000d"
- image: {{ .Values.image.policyNexus }}
+ image: "{{ .Values.image.policyNexus }}:{{ .Values.image.policyNexusVersion }}"
imagePullPolicy: {{ .Values.pullPolicy }}
name: nexus
imagePullSecrets:
}
}
],
- "image": "{{ .Values.image.readiness }}",
+ "image": "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}",
"imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "pap-readiness"
},
- /bin/bash
- ./do-start.sh
- pap
- image: {{ .Values.image.policyPe }}
+ image: "{{ .Values.image.policyPe }}:{{ .Values.image.policyPeVersion }}"
imagePullPolicy: {{ .Values.pullPolicy }}
name: pap
ports:
}
}
],
- "image": "{{ .Values.image.readiness }}",
+ "image": "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}",
"imagePullPolicy": "{{ .Values.pullPolicy }}",
"name": "pdp-readiness"
}
- /bin/bash
- ./do-start.sh
- pdp
- image: {{ .Values.image.policyPe }}
+ image: "{{ .Values.image.policyPe }}:{{ .Values.image.policyPeVersion }}"
imagePullPolicy: {{ .Values.pullPolicy }}
name: pdp
ports:
+++ /dev/null
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
- name: pypdp
- namespace: "{{ .Values.nsPrefix }}-policy"
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: pypdp
- template:
- metadata:
- labels:
- app: pypdp
- name: pypdp
- annotations:
- pod.beta.kubernetes.io/init-containers: '[
- {
- "args": [
- "--container-name",
- "mariadb",
- "--container-name",
- "nexus",
- "--container-name",
- "pap",
- "--container-name",
- "pdp"
- ],
- "command": [
- "/root/ready.py"
- ],
- "env": [
- {
- "name": "NAMESPACE",
- "valueFrom": {
- "fieldRef": {
- "apiVersion": "v1",
- "fieldPath": "metadata.namespace"
- }
- }
- }
- ],
- "image": "{{ .Values.image.readiness }}",
- "imagePullPolicy": "{{ .Values.pullPolicy }}",
- "name": "pypdp-readiness"
- }
- ]'
- spec:
- containers:
- - command:
- - /bin/bash
- - ./do-start.sh
- - pypdp
- image: {{ .Values.image.policyPe }}
- imagePullPolicy: {{ .Values.pullPolicy }}
- name: pypdp
- ports:
- - containerPort: 8480
- readinessProbe:
- tcpSocket:
- port: 8480
- initialDelaySeconds: 5
- periodSeconds: 10
- volumeMounts:
- - mountPath: /tmp/policy-install/config
- name: pe
- volumes:
- - name: pe
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/opt/policy/config/pe/
- imagePullSecrets:
- - name: "{{ .Values.nsPrefix }}-docker-registry-key"
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: policy-db
+ namespace: "{{ .Values.nsPrefix }}-policy"
+ labels:
+ name: policy-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/mariadb/data/
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: policy-db
+ namespace: "{{ .Values.nsPrefix }}-policy"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: policy-db
pullPolicy: Always
nodePortPrefix: 302
image:
- readiness: oomk8s/readiness-check:1.0.0
- policyPe: nexus3.onap.org:10001/openecomp/policy/policy-pe:1.0-STAGING-latest
- policyDrools: nexus3.onap.org:10001/openecomp/policy/policy-drools:1.0-STAGING-latest
- policyDb: nexus3.onap.org:10001/openecomp/policy/policy-db:1.0-STAGING-latest
- policyNexus: nexus3.onap.org:10001/openecomp/policy/policy-nexus:1.0-STAGING-latest
+ readiness: oomk8s/readiness-check
+ readinessVersion: 1.0.0
+ policyPe: nexus3.onap.org:10001/openecomp/policy/policy-pe
+ policyPeVersion: 1.1-STAGING-latest
+ policyDrools: nexus3.onap.org:10001/openecomp/policy/policy-drools
+ policyDroolsVersion: 1.1-STAGING-latest
+ policyDb: nexus3.onap.org:10001/openecomp/policy/policy-db
+ policyDbVersion: 1.1-STAGING-latest
+ policyNexus: nexus3.onap.org:10001/openecomp/policy/policy-nexus
+ policyNexusVersion: 1.1-STAGING-latest
ubuntu: ubuntu:16.04
periodSeconds: 10
volumes:
- name: portal-mariadb-data
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/mariadb/data
+ persistentVolumeClaim:
+ claimName: portal-db
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: portal-db
+ namespace: "{{ .Values.nsPrefix }}-portal"
+ labels:
+ name: portal-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/mariadb/data
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: portal-db
+ namespace: "{{ .Values.nsPrefix }}-portal"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: portal-db
volumeMounts:
- name: robot-eteshare
mountPath: /share
- - name: robot-resources-openstack-keystone-int
- mountPath: /var/opt/OpenECOMP_ETE/robot/resources/openstack/keystone_interface.robot
- name: robot-resources-asdc-interface
mountPath: /var/opt/OpenECOMP_ETE/robot/resources/asdc_interface.robot
- name: robot-resources-policy-interface
mountPath: /var/opt/OpenECOMP_ETE/robot/resources/sdngc_interface.robot
- name: lighttpd-authorization
mountPath: /etc/lighttpd/authorization
+ - name: robot-assets-asdc-base-clearwater-env
+ mountPath: /var/opt/OpenECOMP_ETE/robot/assets/asdc/base_clearwater/base_clearwater.env
+ - name: robot-assets-asdc-base-vfw-env
+ mountPath: /var/opt/OpenECOMP_ETE/robot/assets/asdc/base_vfw/base_vfw.env
+ - name: robot-assets-asdc-base-vlb-env
+ mountPath: /var/opt/OpenECOMP_ETE/robot/assets/asdc/base_vlb/base_vlb.env
+ - name: robot-assets-asdc-base-vlb-dns-env
+ mountPath: /var/opt/OpenECOMP_ETE/robot/assets/asdc/base_vlb/dnsscaling.env
ports:
- containerPort: 88
readinessProbe:
- name: robot-eteshare
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/eteshare
- - name: robot-resources-openstack-keystone-int
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/resources/openstack/keystone_interface.robot
- name: robot-resources-asdc-interface
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/resources/asdc_interface.robot
- name: lighttpd-authorization
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/authorization
+ - name: robot-assets-asdc-base-clearwater-env
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/assets/asdc/base_clearwater/base_clearwater.env
+ - name: robot-assets-asdc-base-vfw-env
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/assets/asdc/base_vfw/base_vfw.env
+ - name: robot-assets-asdc-base-vlb-env
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/assets/asdc/base_vlb/base_vlb.env
+ - name: robot-assets-asdc-base-vlb-dns-env
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/assets/asdc/base_vlb/dnsscaling.env
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
periodSeconds: 10
volumes:
- name: sdc-sdc-cs-cs
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/sdc-cs/CS
+ persistentVolumeClaim:
+ claimName: sdc-cs-db
- name: sdc-environments
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/environments
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: sdc-cs-db
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+ labels:
+ name: sdc-cs-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdc/sdc-cs/CS
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: sdc-cs-db
+ namespace: "{{ .Values.nsPrefix }}-sdc"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: sdc-cs-db
periodSeconds: 10
volumes:
- name: sdnc-data
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/data
+ persistentVolumeClaim:
+ claimName: sdnc-db
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: sdnc-db
+ namespace: "{{ .Values.nsPrefix }}-sdnc"
+ labels:
+ name: sdnc-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/data
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: sdnc-db
+ namespace: "{{ .Values.nsPrefix }}-sdnc"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: sdnc-db
periodSeconds: 10
volumes:
- name: vid-mariadb-data
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/mariadb/data
+ persistentVolumeClaim:
+ claimName: vid-db
- name: vid-pre-init
hostPath:
path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/vid/lf_config/vid-pre-init.sql
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: vid-db
+ namespace: "{{ .Values.nsPrefix }}-vid"
+ labels:
+ name: vid-db
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteMany
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/mariadb/data
+---
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: vid-db
+ namespace: "{{ .Values.nsPrefix }}-vid"
+spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 2Gi
+ selector:
+ matchLabels:
+ name: vid-db