relationships:
- type: cloudify.relationships.connected_to
target: k8s_master
-
- kube2msb:
- type: cloudify.nodes.Helm
- properties:
- args:
- - { get_input: namespace_perfix }
- - kube2msb
- relationships:
- - type: cloudify.relationships.connected_to
- target: k8s_master
PARENT_CHART := onap
COMMON_CHARTS_DIR := common
-
+SETUP_CHARTS_DIR := setup
# FIXME OOM-765
ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
OUTPUT_DIR := $(ROOT_DIR)/dist
PACKAGE_DIR := $(OUTPUT_DIR)/packages
SECRET_DIR := $(OUTPUT_DIR)/secrets
-EXCLUDES := $(COMMON_CHARTS_DIR) config oneclick readiness test dist $(PARENT_CHART) dcae
-HELM_CHARTS := $(COMMON_CHARTS_DIR) $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.))) $(PARENT_CHART)
+EXCLUDES := $(SETUP_CHARTS_DIR) $(COMMON_CHARTS_DIR) config oneclick readiness test dist $(PARENT_CHART) dcae
+HELM_CHARTS := $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.))) $(PARENT_CHART)
.PHONY: $(EXCLUDES) $(HELM_CHARTS)
-all: $(HELM_CHARTS)
+all: $(COMMON_CHARTS_DIR) $(SETUP_CHARTS_DIR) $(HELM_CHARTS)
+
+common:
+ @echo "\n[$@]"
+ @make package-$@
+
+setup:
+ @echo "\n[$@]"
+ @make package-$@
$(HELM_CHARTS):
@echo "\n[$@]"
metadata:
name: clamp
namespace: "{{ .Values.nsPrefix }}"
+ annotations:
+ msb.onap.org/service-info: '[
+ {
+ "serviceName": "clamp",
+ "version": "v1",
+ "url": "/restservices/clds/v1",
+ "protocol": "REST",
+ "port": "8080",
+ "visualRange":"1"
+ }
+ ]'
spec:
ports:
- name: clamp
apiVersion: v1
description: Common templates for inclusion in other charts
-name: common-templates
+name: common
version: 2.0.0
+++ /dev/null
-EXCLUDES := test
-HELM_CHARTS := $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
-
-.PHONY: $(EXCLUDES) $(HELM_CHARTS)
-
-all: $(HELM_CHARTS)
-
-$(HELM_CHARTS):
- @echo "\n[$@]"
- @make lint-$@
-
-make-%:
- @if [ -f $*/Makefile ]; then make -C $*; fi
-
-dep-%: make-%
- @if [ -f $*/requirements.yaml ]; then helm dep up $*; fi
-
-lint-%: dep-%
- @if [ -f $*/Chart.yaml ]; then helm lint $*; fi
-
-clean:
- @rm -f */requirements.lock
- @rm -f *tgz */charts/*tgz
-%:
- @:
\ No newline at end of file
using the following value:
- .Values.nsPrefix : override namespace prefix
- - .Values.nsSuffix : override namespace suffix
*/}}
{{- define "common.namespace" -}}
- {{- default .Release.Name .Values.nsPrefix -}}
+ {{- default .Release.Namespace .Values.nsPrefix -}}
{{- end -}}
+++ /dev/null
-# File generated from /opt/config
-#
-GLOBAL_INJECTED_AAI1_IP_ADDR = "aai-service.namespace-placeholder"
-GLOBAL_INJECTED_AAI2_IP_ADDR = "N/A"
-GLOBAL_INJECTED_APPC_IP_ADDR = "appc-sdnhost.namespace-placeholder"
-GLOBAL_INJECTED_ARTIFACTS_VERSION = "1.1.0-SNAPSHOT"
-GLOBAL_INJECTED_CLAMP_IP_ADDR = "clamp.namespace-placeholder"
-GLOBAL_INJECTED_CLOUD_ENV = "openstack"
-GLOBAL_INJECTED_DCAE_IP_ADDR = "dcae-controller.namespace-placeholder"
-GLOBAL_INJECTED_DNS_IP_ADDR = "10.0.100.1"
-GLOBAL_INJECTED_DOCKER_VERSION = "1.1-STAGING-latest"
-#GLOBAL_INJECTED_EXTERNAL_DNS = "N/A"
-GLOBAL_INJECTED_GERRIT_BRANCH = "master"
-GLOBAL_INJECTED_KEYSTONE = "OPENSTACK_KEYSTONE_IP_HERE"
-GLOBAL_INJECTED_MR_IP_ADDR = "dmaap.namespace-placeholder"
-GLOBAL_INJECTED_MSO_IP_ADDR = "mso.namespace-placeholder"
-GLOBAL_INJECTED_NETWORK = "OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE"
-GLOBAL_INJECTED_NEXUS_DOCKER_REPO = "nexus3.onap.org:10001"
-GLOBAL_INJECTED_NEXUS_PASSWORD = "docker"
-GLOBAL_INJECTED_NEXUS_REPO = "https://nexus.onap.org/content/sites/raw"
-GLOBAL_INJECTED_NEXUS_USERNAME = "docker"
-GLOBAL_INJECTED_OPENO_IP_ADDR = "msb-iag.namespace-placeholder"
-GLOBAL_INJECTED_OPENSTACK_PASSWORD = "OPENSTACK_PASSWORD_HERE"
-GLOBAL_INJECTED_OPENSTACK_TENANT_ID = "OPENSTACK_TENANT_ID_HERE"
-GLOBAL_INJECTED_OPENSTACK_USERNAME = "OPENSTACK_USERNAME_HERE"
-GLOBAL_INJECTED_POLICY_IP_ADDR = "pypdp.namespace-placeholder"
-GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR = "drools.namespace-placeholder"
-GLOBAL_INJECTED_PORTAL_IP_ADDR = "portalapps.namespace-placeholder"
-GLOBAL_INJECTED_REGION = "OPENSTACK_REGION_HERE"
-GLOBAL_INJECTED_REMOTE_REPO = "http://gerrit.onap.org/r/testsuite/properties.git"
-GLOBAL_INJECTED_SDC_IP_ADDR = "sdc-be.namespace-placeholder"
-GLOBAL_INJECTED_SDC_FE_IP_ADDR = "sdc-fe.namespace-placeholder"
-GLOBAL_INJECTED_SDC_BE_IP_ADDR = "sdc-be.namespace-placeholder"
-GLOBAL_INJECTED_SDNC_IP_ADDR = "sdnhost.namespace-placeholder"
-GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR = "sdnc-portal.namespace-placeholder"
-GLOBAL_INJECTED_SO_IP_ADDR = "mso.namespace-placeholder"
-GLOBAL_INJECTED_VID_IP_ADDR = "vid-server.namespace-placeholder"
-GLOBAL_INJECTED_VM_FLAVOR = "OPENSTACK_FLAVOUR_MEDIUM_HERE"
-GLOBAL_INJECTED_VM_IMAGE_NAME = "UBUNTU_14_IMAGE_NAME_HERE"
-GLOBAL_INJECTED_PUBLIC_NET_ID = "OPENSTACK_PUBLIC_NET_ID_HERE"
-
-GLOBAL_INJECTED_PROPERTIES = {
- "GLOBAL_INJECTED_AAI1_IP_ADDR" : "aai-service.namespace-placeholder",
- "GLOBAL_INJECTED_APPC_IP_ADDR" : "appc-sdnhost.namespace-placeholder",
- "GLOBAL_INJECTED_ARTIFACTS_VERSION" : "1.1.0-SNAPSHOT",
- "GLOBAL_INJECTED_CLAMP_IP_ADDR" : "clamp.namespace-placeholder",
- "GLOBAL_INJECTED_CLOUD_ENV" : "openstack",
- "GLOBAL_INJECTED_DCAE_IP_ADDR" : "dcae-controller.namespace-placeholder",
- "GLOBAL_INJECTED_DNS_IP_ADDR" : "10.0.100.1",
- "GLOBAL_INJECTED_DOCKER_VERSION" : "1.1-STAGING-latest",
- "GLOBAL_INJECTED_GERRIT_BRANCH" : "master",
- "GLOBAL_INJECTED_KEYSTONE" : "OPENSTACK_KEYSTONE_IP_HERE",
- "GLOBAL_INJECTED_MR_IP_ADDR" : "dmaap.namespace-placeholder",
- "GLOBAL_INJECTED_MSO_IP_ADDR" : "mso.namespace-placeholder",
- "GLOBAL_INJECTED_NETWORK" : "OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE",
- "GLOBAL_INJECTED_NEXUS_DOCKER_REPO" : "nexus3.onap.org:10001",
- "GLOBAL_INJECTED_NEXUS_PASSWORD" : "docker",
- "GLOBAL_INJECTED_NEXUS_REPO" : "https://nexus.onap.org/content/sites/raw",
- "GLOBAL_INJECTED_NEXUS_USERNAME" : "docker",
- "GLOBAL_INJECTED_OPENO_IP_ADDR" : "msb-iag.namespace-placeholder",
- "GLOBAL_INJECTED_OPENSTACK_PASSWORD" : "OPENSTACK_PASSWORD_HERE",
- "GLOBAL_INJECTED_OPENSTACK_TENANT_ID" : "OPENSTACK_TENANT_ID_HERE",
- "GLOBAL_INJECTED_OPENSTACK_USERNAME" : "OPENSTACK_USERNAME_HERE",
- "GLOBAL_INJECTED_POLICY_IP_ADDR" : "pypdp.namespace-placeholder",
- "GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR" : "drools.namespace-placeholder",
- "GLOBAL_INJECTED_PORTAL_IP_ADDR" : "portalapps.namespace-placeholder",
- "GLOBAL_INJECTED_REGION" : "OPENSTACK_REGION_HERE",
- "GLOBAL_INJECTED_REMOTE_REPO" : "http://gerrit.onap.org/r/testsuite/properties.git",
- "GLOBAL_INJECTED_SDC_FE_IP_ADDR" : "sdc-fe.namespace-placeholder",
- "GLOBAL_INJECTED_SDC_BE_IP_ADDR" : "sdc-be.namespace-placeholder",
- "GLOBAL_INJECTED_SDNC_IP_ADDR" : "sdnhost.namespace-placeholder",
- "GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR" : "sdnc-portal.namespace-placeholder",
- "GLOBAL_INJECTED_SO_IP_ADDR" : "mso.namespace-placeholder",
- "GLOBAL_INJECTED_VID_IP_ADDR" : "vid-server.namespace-placeholder",
- "GLOBAL_INJECTED_VM_FLAVOR" : "OPENSTACK_FLAVOUR_MEDIUM_HERE",
- "GLOBAL_INJECTED_VM_IMAGE_NAME" : "UBUNTU_14_IMAGE_NAME_HERE",
- "GLOBAL_INJECTED_PUBLIC_NET_ID" : "OPENSTACK_PUBLIC_NET_ID_HERE"
-}
"url": "/api/aai-esr-server/v1",
"protocol": "REST",
"port": "{{.Values.esrserver.port}}",
+ "enable_ssl": true,
"visualRange":"1"
}
]'
+++ /dev/null
-apiVersion: v1
-description: A Helm chart for Kubernetes
-name: kube2msb
-version: 0.1.0
+++ /dev/null
-nsPrefix: onap
-pullPolicy: IfNotPresent
-image:
- kube2msb: nexus3.onap.org:10001/onap/oom/kube2msb
-kubeMasterUrl: https://kubernetes.default.svc.cluster.local:443
-discoveryUrl: http://msb-discovery.{{ .Values.nsPrefix }}:10081
name: kube2msb-registrator
spec:
hostname: kube2msb-registrator
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - msb-discovery
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: kube2msb-registrator-readiness
containers:
- args:
image: {{ .Values.image.kube2msb }}
consul: consul:0.9.3
discovery: nexus3.onap.org:10001/onap/msb/msb_discovery:1.1.0-SNAPSHOT-latest
apigateway: nexus3.onap.org:10001/onap/msb/msb_apigateway:1.1.0-SNAPSHOT-latest
+ kube2msb: nexus3.onap.org:10001/onap/oom/kube2msb
consulPort: 8500
consulNodePort: 30500
eagNodePort: 30082
eagNodePort_https: 30446
eagReplicas: 1
+
+kubeMasterUrl: https://kubernetes.default.svc.cluster.local:443
+discoveryUrl: http://msb-discovery.{{ .Values.nsPrefix }}:10081
--- /dev/null
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+ #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+ paths:
+ - /var/log/onap/*/*/*/*.log
+ - /var/log/onap/*/*/*.log
+ - /var/log/onap/*/*.log
+ #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+ ignore_older: 24h
+ # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+ clean_inactive: 48h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+ #List of logstash server ip addresses with port number.
+ #But, in our case, this will be the loadbalancer IP address.
+ #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+ hosts: ["logstash.{{.Values.nsPrefix}}:5044"]
+ #If enable will do load balancing among availabe Logstash, automatically.
+ loadbalance: true
+
+ #The list of root certificates for server verifications.
+ #If certificate_authorities is empty or not set, the trusted
+ #certificate authorities of the host system are used.
+ #ssl.certificate_authorities: $ssl.certificate_authorities
+
+ #The path to the certificate for SSL client authentication. If the certificate is not specified,
+ #client authentication is not available.
+ #ssl.certificate: $ssl.certificate
+
+ #The client certificate key used for client authentication.
+ #ssl.key: $ssl.key
+
+ #The passphrase used to decrypt an encrypted key stored in the configured key file
+ #ssl.key_passphrase: $ssl.key_passphrase
--- /dev/null
+version: 1
+disable_existing_loggers: False
+
+loggers:
+ multivimbroker:
+ handlers: [multivimbroker_handler]
+ level: "DEBUG"
+ propagate: False
+
+handlers:
+ multivimbroker_handler:
+ level: "DEBUG"
+ class: "logging.handlers.RotatingFileHandler"
+ filename: "/var/log/onap/multicloud/multivimbroker/multivimbroker.log"
+ formatter: "mdcFormat"
+ maxBytes: 1024*1024*50
+ backupCount: 10
+
+formatters:
+ standard:
+ format: "%(asctime)s:[%(name)s]:[%(filename)s]-[%(lineno)d] [%(levelname)s]:%(message)s"
+ mdcFormat:
+ format: "%(asctime)s:[%(name)s]:[%(filename)s]-[%(lineno)d] [%(levelname)s]:[%(mdc)s]: %(message)s"
+ mdcfmt: "{requestID}"
+ datefmt: "%Y-%m-%d %H:%M:%S"
+ (): onaplogging.mdcformatter.MDCFormatter
\ No newline at end of file
--- /dev/null
+version: 1
+disable_existing_loggers: False
+
+loggers:
+ vio:
+ handlers: [vio_handler]
+ level: "DEBUG"
+ propagate: False
+
+handlers:
+ vio_handler:
+ level: "DEBUG"
+ class: "logging.handlers.RotatingFileHandler"
+ filename: "/var/log/onap/multicloud/vio/vio.log"
+ formatter: "mdcFormat"
+ maxBytes: 1024*1024*50
+ backupCount: 10
+
+formatters:
+ standard:
+ format: "%(asctime)s:[%(name)s]:[%(filename)s]-[%(lineno)d] [%(levelname)s]:%(message)s"
+ mdcFormat:
+ format: "%(asctime)s:[%(name)s]:[%(filename)s]-[%(lineno)d] [%(levelname)s]:[%(mdc)s]: %(message)s"
+ mdcfmt: "{requestID}"
+ datefmt: "%Y-%m-%d %H:%M:%S"
+ (): onaplogging.mdcformatter.MDCFormatter
\ No newline at end of file
app: framework
name: multicloud-framework
spec:
- hostname: framework
containers:
- env:
- name: MSB_ADDR
value: "AAI"
- name: AAI_PASSWORD
value: "AAI"
+ image: {{ .Values.image.framework }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
name: framework
volumeMounts:
- - mountPath: /service/multivimbroker/logs
+ - mountPath: /var/log/onap
name: framework-log
- image: {{ .Values.image.framework }}
- imagePullPolicy: {{ .Values.pullPolicy }}
+ - mountPath: /opt/multivimbroker/multivimbroker/pub/config/log.yml
+ name: framework-logconfig
+ subPath: log.yml
ports:
- containerPort: 9001
livenessProbe:
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 5
- restartPolicy: Always
+ - image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: filebeat-onap
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ subPath: filebeat.yml
+ - mountPath: /var/log/onap
+ name: framework-log
+ - mountPath: /usr/share/filebeat/data
+ name: framework-data-filebeat
volumes:
- name: framework-log
- hostPath:
- path: {{ .Values.dataRootDir }}/{{ .Values.nsPrefix }}/multicloud/framework/logs
+ emptyDir: {}
+ - name: framework-data-filebeat
+ emptyDir: {}
+ - name: filebeat-conf
+ configMap:
+ name: multicloud-filebeat-configmap
+ - name: framework-logconfig
+ configMap:
+ name: multicloud-framework-log-configmap
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+ restartPolicy: Always
#{{ end }}
--- /dev/null
+#{{ if not .Values.disableVidVidServer }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: multicloud-filebeat-configmap
+ namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: multicloud-framework-log-configmap
+ namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/log/framework/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: multicloud-vio-log-configmap
+ namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/log/vio/*").AsConfig . | indent 2 }}
+#{{ end }}
app: multicloud-vio
name: multicloud-vio
spec:
- hostname: multicloud-vio
containers:
- env:
- name: MSB_ADDR
value: "AAI"
name: multicloud-vio
volumeMounts:
- - mountPath: /service/vio/logs
+ - mountPath: /var/log/onap
name: vio-log
+ - mountPath: /opt/vio/vio/pub/config/log.yml
+ name: vio-logconfig
+ subPath: log.yml
image: {{ .Values.image.vio }}
imagePullPolicy: {{ .Values.pullPolicy }}
ports:
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 5
- restartPolicy: Always
+ - image: {{ .Values.image.filebeat }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: filebeat-onap
+ volumeMounts:
+ - mountPath: /usr/share/filebeat/filebeat.yml
+ name: filebeat-conf
+ subPath: filebeat.yml
+ - mountPath: /var/log/onap
+ name: vio-log
+ - mountPath: /usr/share/filebeat/data
+ name: vio-data-filebeat
volumes:
- name: vio-log
- hostPath:
- path: {{ .Values.dataRootDir }}/{{ .Values.nsPrefix }}/multicloud/vio/logs
+ emptyDir: {}
+ - name: vio-data-filebeat
+ emptyDir: {}
+ - name: filebeat-conf
+ configMap:
+ name: multicloud-filebeat-configmap
+ - name: vio-logconfig
+ configMap:
+ name: multicloud-vio-log-configmap
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
+ restartPolicy: Always
#{{ end }}
windRiverReplicas: 1
image:
readiness: oomk8s/readiness-check:1.1.0
- framework: nexus3.onap.org:10001/onap/multicloud/framework:v1.0.0
- vio: nexus3.onap.org:10001/onap/multicloud/vio:v1.0.0
+ framework: nexus3.onap.org:10001/onap/multicloud/framework:latest
+ vio: nexus3.onap.org:10001/onap/multicloud/vio:latest
ocata: nexus3.onap.org:10001/onap/multicloud/openstack-ocata:v1.0.0
windriver: nexus3.onap.org:10001/onap/multicloud/openstack-windriver:v1.0.0
-
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
# domain name of msb gateway
msbgateway: msb-iag.{{ .Values.nsPrefix }}
msbPort: 80
version: ~0.1.0
repository: '@local'
condition: cli.enabled
+ - name: common
+ version: ~2.0.0
+ repository: '@local'
- name: consul
version: ~1.1.0
repository: '@local'
version: ~1.1.0
repository: '@local'
condition: esr.enabled
- - name: kube2msb
- version: ~0.1.0
- repository: '@local'
- condition: kube2msb.enabled
- name: log
version: ~0.1.0
repository: '@local'
version: ~0.1.0
repository: '@local'
condition: sdnc.enabled
+ - name: setup
+ version: ~2.0.0
+ repository: '@local'
- name: so
version: ~2.0.0
repository: '@local'
- name: vnfsdk
version: ~1.1.0
repository: '@local'
- condition: vnfsdk.enabled
\ No newline at end of file
+ condition: vnfsdk.enabled
# flag to enable debugging - application support required
debugEnabled: false
+
#################################################################
# Enable/disable and configure helm charts (ie. applications)
# to customize the ONAP deployment.
enabled: false
esr:
enabled: false
-kube2msb:
- enabled: false
log:
enabled: false
message-router:
vid:
enabled: false
vnfsdk:
- enabled: false
\ No newline at end of file
+ enabled: false
#!/bin/bash
-# Deploying MSB first and kube2msb last will ensure all the ONAP services can be registered to MSB
-HELM_APPS=('consul' 'msb' 'mso' 'message-router' 'sdnc' 'vid' 'robot' 'portal' 'policy' 'appc' 'aai' 'sdc' 'dcaegen2' 'log' 'cli' 'multicloud' 'clamp' 'vnfsdk' 'uui' 'aaf' 'vfc' 'kube2msb' 'esr')
+# Deploying MSB first so the started ONAP services can be registered to MSB
+HELM_APPS=('consul' 'msb' 'mso' 'message-router' 'sdnc' 'vid' 'robot' 'portal' 'policy' 'appc' 'aai' 'sdc' 'dcaegen2' 'log' 'cli' 'multicloud' 'clamp' 'vnfsdk' 'uui' 'aaf' 'vfc' 'esr')
ONAP_DOCKER_REGISTRY=${ONAP_DOCKER_REGISTRY:-nexus3.onap.org:10001}
ONAP_DOCKER_USER=${ONAP_DOCKER_USER:-docker}
ONAP_DOCKER_PASS=${ONAP_DOCKER_PASS:-docker}
"pub_key" : "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAqqnA9BAiMLtjOPSYBfhzLu4CiBolWoskDg4KVwhTJVTTeB6CqrQNcadlGXxOHhCYuNCKkUmIVF4WTOisVOJ75Z1c4OMoZLL85xVPKSIeH63kgVugwgPYQu55NbbWX+rsbUha3LnElDhNviMM3iTPbD5nnhKixNERAJMTLKXvZZZGqxW94bREknYPQTT2qrk3YRqwldncopp6Nkgv3AnSJz2gc9tjxnWF0poTQnQm/3D6hiJICrzKfAV0EaPN0KdtYjPhKrYoy6Qb/tKOVaaqsvwfKBJGrT9LfcA7D7M/yj292RT1XN63hI84WC383LsaPJ6eWdDTE6zUP1eGTWCoOw== rsa-key-20161026",
"repo_url_blob" : "https://nexus.onap.org/content/repositories/raw",
"repo_url_artifacts" : "https://nexus.onap.org/content/groups/staging",
- "demo_artifacts_version" : "DEMO_ARTIFACTS_VERSION_HERE",
- "onap_private_net_id" : "OPENSTACK_NETWORK_ID_WITH_ONAP_ROUTE_HERE",
- "onap_private_subnet_id" : "OPENSTACK_SUBNET_ID_WITH_ONAP_ROUTE_HERE",
- "onap_private_net_cidr" : "NETWORK_CIDR_WITH_ONAP_ROUTE_HERE",
+ "demo_artifacts_version" : "{{ .Values.demoArtifactsVersion }}",
+ "onap_private_net_id" : "{{ .Values.openStackPrivateNetId }}",
+ "onap_private_subnet_id" : "{{ .Values.openStackPrivateSubnetId }}",
+ "onap_private_net_cidr" : "{{ .Values.openStackPrivateNetCidr }}",
"dcae_collector_ip" : "10.0.4.102",
"dcae_collector_port" : "8080",
- "public_net_id" : "OPENSTACK_PUBLIC_NET_ID_HERE",
+ "public_net_id" : "{{ .Values.openStackPublicNetId }}",
"cloud_env" : "${cloud_env}",
- "install_script_version" : "${install_script_version}",
+ "install_script_version" : "${install_script_version}",
###
# vims_preload same for every instantiation
###
"ralf_image_name" : "${vm_image_name}",
"ellis_image_name" : "${vm_image_name}",
"dns_image_name" : "${vm_image_name}",
- "bono_flavor_name" : "${vm_flavor_name}",
- "sprout_flavor_name" : "${vm_flavor_name}",
- "homer_flavor_name" : "${vm_flavor_name}",
- "homestead_flavor_name" : "${vm_flavor_name}",
- "ralf_flavor_name" : "${vm_flavor_name}",
- "ellis_flavor_name" : "${vm_flavor_name}",
- "dns_flavor_name" : "${vm_flavor_name}",
- "repo_url" : "http://repo.cw-ngv.com/stable",
- "zone" : "me.cw-ngv.com",
- "dn_range_start" : "2425550000",
- "dn_range_length" : "10000",
- "dnssec_key" : "9FPdYTWhk5+LbhrqtTPQKw==",
+ "bono_flavor_name" : "${vm_flavor_name}",
+ "sprout_flavor_name" : "${vm_flavor_name}",
+ "homer_flavor_name" : "${vm_flavor_name}",
+ "homestead_flavor_name" : "${vm_flavor_name}",
+ "ralf_flavor_name" : "${vm_flavor_name}",
+ "ellis_flavor_name" : "${vm_flavor_name}",
+ "dns_flavor_name" : "${vm_flavor_name}",
+ "repo_url" : "http://repo.cw-ngv.com/stable",
+ "zone" : "me.cw-ngv.com",
+ "dn_range_start" : "2425550000",
+ "dn_range_length" : "10000",
+ "dnssec_key" : "9FPdYTWhk5+LbhrqtTPQKw==",
###
# vlb_preload same for every instantiation
###
- "vlb_image_name" : "${vm_image_name}",
- "vlb_flavor_name" : "${vm_flavor_name}",
+ "vlb_image_name" : "${vm_image_name}",
+ "vlb_flavor_name" : "${vm_flavor_name}",
###
# vlb_preload same for every instantiation
###
- "vfw_image_name" : "${vm_image_name}",
- "vfw_flavor_name" : "${vm_flavor_name}",
+ "vfw_image_name" : "${vm_image_name}",
+ "vfw_flavor_name" : "${vm_flavor_name}",
###
},
"protected_private_net_cidr" : "192.168.20.0/24",
"vfw_private_ip_0" : "192.168.10.100",
"vfw_private_ip_1" : "192.168.20.100",
- "vfw_private_ip_2" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.1",
+ "vfw_private_ip_2" : "{{ .Values.openStackOamNetworkCidrPrefix }}.1",
"vpg_private_ip_0" : "192.168.10.200",
- "vpg_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.2",
+ "vpg_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.2",
"vsn_private_ip_0" : "192.168.20.250",
- "vsn_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.3",
+ "vsn_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.3",
'vfw_name_0':'vofwl01fwl${hostid}',
'vpg_name_0':'vofwl01pgn${hostid}',
'vsn_name_0':'vofwl01snk${hostid}'
"vlb_private_net_id" : "volb01_private${hostid}",
"vlb_private_net_cidr" : "192.168.30.0/24",
"vlb_private_ip_0" : "192.168.30.100",
- "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.4",
+ "vlb_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.4",
"vdns_private_ip_0" : "192.168.30.110",
- "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.5",
+ "vdns_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.5",
'vlb_name_0':'vovlblb${hostid}',
'vdns_name_0':'vovlbdns${hostid}',
- "vlb_private_net_cidr" : "192.168.10.0/24",
- "pktgen_private_net_cidr" : "192.168.9.0/24"
-
+ "vlb_private_net_cidr" : "192.168.10.0/24",
+ "pktgen_private_net_cidr" : "192.168.9.0/24"
+
},
"dnsscaling_preload.template" : {
"vlb_private_net_id" : "volb01_private${hostid}",
"vlb_private_ip_0" : "192.168.30.100",
- "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.4",
+ "vlb_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.4",
"vdns_private_ip_0" : "192.168.30.222",
- "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.6",
+ "vdns_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.6",
'scaling_vdns_name_0':'vovlbscaling${hostid}',
- "vlb_private_net_cidr" : "192.168.10.0/24"
+ "vlb_private_net_cidr" : "192.168.10.0/24"
},
"vvg_preload.template" : {
}
},
# heat template parameter values for heat template instances created during Closed-Loop test cases
"Closed-Loop" : {
- "vfw_preload.template": {
+ "vfw_preload.template": {
"unprotected_private_net_id" : "clfwl01_unprotected${hostid}",
"unprotected_private_net_cidr" : "192.168.110.0/24",
"protected_private_net_id" : "clfwl01_protected${hostid}",
"protected_private_net_cidr" : "192.168.120.0/24",
"vfw_private_ip_0" : "192.168.110.100",
"vfw_private_ip_1" : "192.168.120.100",
- "vfw_private_ip_2" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.11",
+ "vfw_private_ip_2" : "{{ .Values.openStackOamNetworkCidrPrefix }}.11",
"vpg_private_ip_0" : "192.168.110.200",
- "vpg_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.12",
+ "vpg_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.12",
"vsn_private_ip_0" : "192.168.120.250",
- "vsn_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.13",
+ "vsn_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.13",
'vfw_name_0':'clfwl01fwl${hostid}',
'vpg_name_0':'clfwl01pgn${hostid}',
'vsn_name_0':'clfwl01snk${hostid}'
"vlb_private_net_id" : "cllb01_private${hostid}",
"vlb_private_net_cidr" : "192.168.130.0/24",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.14",
+ "vlb_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.14",
"vdns_private_ip_0" : "192.168.130.110",
- "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.15",
+ "vdns_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.15",
'vlb_name_0':'clvlblb${hostid}',
'vdns_name_0':'clvlbdns${hostid}',
- "vlb_private_net_cidr" : "192.168.10.0/24",
- "pktgen_private_net_cidr" : "192.168.9.0/24"
+ "vlb_private_net_cidr" : "192.168.10.0/24",
+ "pktgen_private_net_cidr" : "192.168.9.0/24"
},
"dnsscaling_preload.template" : {
"vlb_private_net_id" : "cllb01_private${hostid}",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.14",
+ "vlb_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.14",
"vdns_private_ip_0" : "192.168.130.222",
- "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.16",
+ "vdns_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.16",
'scaling_vdns_name_0':'clvlbscaling${hostid}',
- "vlb_private_net_cidr" : "192.168.10.0/24"
+ "vlb_private_net_cidr" : "192.168.10.0/24"
},
"vvg_preload.template" : {
}
"protected_private_net_cidr" : "192.168.120.0/24",
"vfw_private_ip_0" : "192.168.110.100",
"vfw_private_ip_1" : "192.168.120.100",
- "vfw_private_ip_2" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.11",
+ "vfw_private_ip_2" : "{{ .Values.openStackOamNetworkCidrPrefix }}.11",
"vpg_private_ip_0" : "192.168.110.200",
- "vpg_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.12",
+ "vpg_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.12",
"vsn_private_ip_0" : "192.168.120.250",
- "vsn_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.13",
+ "vsn_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.13",
'vfw_name_0':'demofwl01fwl',
'vpg_name_0':'demofwl01pgn',
'vsn_name_0':'demofwl01snk'
"vlb_private_net_id" : "demolb_private",
"vlb_private_net_cidr" : "192.168.130.0/24",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.14",
+ "vlb_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.14",
"vdns_private_ip_0" : "192.168.130.110",
- "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.15",
+ "vdns_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.15",
'vlb_name_0':'demovlblb',
'vdns_name_0':'demovlbdns',
- "vlb_private_net_cidr" : "192.168.10.0/24",
- "pktgen_private_net_cidr" : "192.168.9.0/24"
+ "vlb_private_net_cidr" : "192.168.10.0/24",
+ "pktgen_private_net_cidr" : "192.168.9.0/24"
},
"dnsscaling_preload.template" : {
"vlb_private_net_id" : "demolb_private",
"vlb_private_ip_0" : "192.168.130.100",
- "vlb_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.14",
+ "vlb_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.14",
"vdns_private_ip_0" : "192.168.130.222",
- "vdns_private_ip_1" : "OPENSTACK_OAM_NETWORK_CIDR_PREFIX_HERE.16",
+ "vdns_private_ip_1" : "{{ .Values.openStackOamNetworkCidrPrefix }}.16",
'scaling_vdns_name_0':'demovlbscaling',
- "vlb_private_net_cidr" : "192.168.10.0/24"
+ "vlb_private_net_cidr" : "192.168.10.0/24"
},
"vvg_preload.template" : {
}
}
}
-
GLOBAL_PROXY_WARNING_TITLE = ""
GLOBAL_PROXY_WARNING_CONTINUE_XPATH = ""
# settings for vm to attach vvg too
-GLOBAL_VVGSERVER_IMAGE = "UBUNTU_14_IMAGE_NAME_HERE"
-GLOBAL_VVGSERVER_FLAVOR = "OPENSTACK_FLAVOUR_MEDIUM_HERE"
+GLOBAL_VVGSERVER_IMAGE = "{{ .Values.ubuntuImage }}"
+GLOBAL_VVGSERVER_FLAVOR = "{{ .Values.openStackFlavourMedium }}"
# dns info
GLOBAL_DNS_TRAFFIC_DURATION = "600"
GLOBAL_HEAT_TEMPLATES_FOLDER = "/share/heat"
--- /dev/null
+# File generated from /opt/config
+#
+GLOBAL_INJECTED_AAI1_IP_ADDR = "aai-service.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_AAI2_IP_ADDR = "N/A"
+GLOBAL_INJECTED_APPC_IP_ADDR = "appc-sdnhost.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_ARTIFACTS_VERSION = "1.1.0-SNAPSHOT"
+GLOBAL_INJECTED_CLAMP_IP_ADDR = "clamp.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_CLOUD_ENV = "openstack"
+GLOBAL_INJECTED_DCAE_IP_ADDR = "dcae-controller.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_DNS_IP_ADDR = "10.0.100.1"
+GLOBAL_INJECTED_DOCKER_VERSION = "1.1-STAGING-latest"
+#GLOBAL_INJECTED_EXTERNAL_DNS = "N/A"
+GLOBAL_INJECTED_GERRIT_BRANCH = "master"
+GLOBAL_INJECTED_KEYSTONE = "{{ .Values.openStackKeyStoneUrl }}"
+GLOBAL_INJECTED_MR_IP_ADDR = "dmaap.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_MSO_IP_ADDR = "mso.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_NETWORK = "{{ .Values.openStackPrivateNetId }}"
+GLOBAL_INJECTED_NEXUS_DOCKER_REPO = "nexus3.onap.org:10001"
+GLOBAL_INJECTED_NEXUS_PASSWORD = "docker"
+GLOBAL_INJECTED_NEXUS_REPO = "https://nexus.onap.org/content/sites/raw"
+GLOBAL_INJECTED_NEXUS_USERNAME = "docker"
+GLOBAL_INJECTED_OPENO_IP_ADDR = "msb-iag.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_OPENSTACK_PASSWORD = "{{ .Values.openStackEncryptedPassword }}"
+GLOBAL_INJECTED_OPENSTACK_TENANT_ID = "{{ .Values.openStackTenantId }}"
+GLOBAL_INJECTED_OPENSTACK_USERNAME = "{{ .Values.openStackUserName }}"
+GLOBAL_INJECTED_POLICY_IP_ADDR = "pypdp.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR = "drools.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_PORTAL_IP_ADDR = "portalapps.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_REGION = "{{ .Values.openStackRegion }}"
+GLOBAL_INJECTED_REMOTE_REPO = "http://gerrit.onap.org/r/testsuite/properties.git"
+GLOBAL_INJECTED_SDC_IP_ADDR = "sdc-be.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_SDC_FE_IP_ADDR = "sdc-fe.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_SDC_BE_IP_ADDR = "sdc-be.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_SDNC_IP_ADDR = "sdnhost.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR = "sdnc-portal.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_SO_IP_ADDR = "mso.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_VID_IP_ADDR = "vid-server.{{ .Values.nsPrefix }}"
+GLOBAL_INJECTED_VM_FLAVOR = "{{ .Values.openStackFlavourMedium }}"
+GLOBAL_INJECTED_VM_IMAGE_NAME = "{{ .Values.ubuntuImage }}"
+GLOBAL_INJECTED_PUBLIC_NET_ID = "{{ .Values.openStackPublicNetId }}"
+GLOBAL_INJECTED_PROPERTIES = {
+ "GLOBAL_INJECTED_AAI1_IP_ADDR" : "aai-service.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_APPC_IP_ADDR" : "appc-sdnhost.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_ARTIFACTS_VERSION" : "1.1.0-SNAPSHOT",
+ "GLOBAL_INJECTED_CLAMP_IP_ADDR" : "clamp.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_CLOUD_ENV" : "openstack",
+ "GLOBAL_INJECTED_DCAE_IP_ADDR" : "dcae-controller.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_DNS_IP_ADDR" : "10.0.100.1",
+ "GLOBAL_INJECTED_DOCKER_VERSION" : "1.1-STAGING-latest",
+ "GLOBAL_INJECTED_GERRIT_BRANCH" : "master",
+ "GLOBAL_INJECTED_KEYSTONE" : "{{ .Values.openStackKeyStoneUrl }}",
+ "GLOBAL_INJECTED_MR_IP_ADDR" : "dmaap.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_MSO_IP_ADDR" : "mso.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_NETWORK" : "{{ .Values.openStackPrivateNetId }}",
+ "GLOBAL_INJECTED_NEXUS_DOCKER_REPO" : "nexus3.onap.org:10001",
+ "GLOBAL_INJECTED_NEXUS_PASSWORD" : "docker",
+ "GLOBAL_INJECTED_NEXUS_REPO" : "https://nexus.onap.org/content/sites/raw",
+ "GLOBAL_INJECTED_NEXUS_USERNAME" : "docker",
+ "GLOBAL_INJECTED_OPENO_IP_ADDR" : "msb-iag.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_OPENSTACK_PASSWORD" : "{{ .Values.openStackEncryptedPassword }}",
+ "GLOBAL_INJECTED_OPENSTACK_TENANT_ID" : "{{ .Values.openStackTenantId }}",
+ "GLOBAL_INJECTED_OPENSTACK_USERNAME" : "{{ .Values.openStackUserName }}",
+ "GLOBAL_INJECTED_POLICY_IP_ADDR" : "pypdp.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR" : "drools.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_PORTAL_IP_ADDR" : "portalapps.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_REGION" : "{{ .Values.openStackRegion }}",
+ "GLOBAL_INJECTED_REMOTE_REPO" : "http://gerrit.onap.org/r/testsuite/properties.git",
+ "GLOBAL_INJECTED_SDC_FE_IP_ADDR" : "sdc-fe.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_SDC_BE_IP_ADDR" : "sdc-be.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_SDNC_IP_ADDR" : "sdnhost.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR" : "sdnc-portal.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_SO_IP_ADDR" : "mso.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_VID_IP_ADDR" : "vid-server.{{ .Values.nsPrefix }}",
+ "GLOBAL_INJECTED_VM_FLAVOR" : "{{ .Values.openStackFlavourMedium }}",
+ "GLOBAL_INJECTED_VM_IMAGE_NAME" : "{{ .Values.ubuntuImage }}",
+ "GLOBAL_INJECTED_PUBLIC_NET_ID" : "{{ .Values.openStackPublicNetId }}"
+}
[Arguments] ${model_zip_path} ${catalog_service_name}=
${catalog_service_id}= Add ASDC Catalog Service ${catalog_service_name}
${catalog_resource_ids}= Create List
+ ${catalog_resources}= Create Dictionary
: FOR ${zip} IN @{model_zip_path}
\ ${loop_catalog_resource_id}= Setup ASDC Catalog Resource ${zip}
\ Append To List ${catalog_resource_ids} ${loop_catalog_resource_id}
\ ${loop_catalog_resource_resp}= Get ASDC Catalog Resource ${loop_catalog_resource_id}
\ Add ASDC Resource Instance ${catalog_service_id} ${loop_catalog_resource_id} ${loop_catalog_resource_resp['name']}
+ \ Set To Dictionary ${catalog_resources} ${loop_catalog_resource_id}=${loop_catalog_resource_resp}
${catalog_service_resp}= Get ASDC Catalog Service ${catalog_service_id}
Checkin ASDC Catalog Service ${catalog_service_id}
Request Certify ASDC Catalog Service ${catalog_service_id}
${catalog_service_resp}= Get ASDC Catalog Service ${catalog_service_id}
${vf_module}= Find Element In Array ${loop_catalog_resource_resp['groups']} type org.openecomp.groups.VfModule
Check Catalog Service Distributed ${catalog_service_resp['uuid']}
- [Return] ${catalog_service_resp['name']} ${loop_catalog_resource_resp['name']} ${vf_module} ${catalog_resource_ids} ${catalog_service_id}
+ [Return] ${catalog_service_resp['name']} ${loop_catalog_resource_resp['name']} ${vf_module} ${catalog_resource_ids} ${catalog_service_id} ${catalog_resources}
Setup ASDC Catalog Resource
[Documentation] Creates all the steps a vf needs for an asdc catalog resource and returns the id
[Arguments] ${catalog_service_uuid}
${dist_resp}= Get Catalog Service Distribution ${catalog_service_uuid}
Should Be Equal As Strings ${dist_resp['distributionStatusOfServiceList'][0]['deployementStatus']} Distributed
+ Sleep 3 minutes
${det_resp}= Get Catalog Service Distribution Details ${dist_resp['distributionStatusOfServiceList'][0]['distributionID']}
@{ITEMS}= Copy List ${det_resp['distributionStatusList']}
:FOR ${ELEMENT} IN @{ITEMS}
Library RequestsLibrary
Library String
Library JSONUtils
-Library Collections
+Library Collections
Resource global_properties.robot
*** Variables ***
Run Policy Health Check
[Documentation] Runs Policy Health check
- ${auth}= Create List ${GLOBAL_POLICY_USERNAME} ${GLOBAL_POLICY_PASSWORD}
+ ${auth}= Create List ${GLOBAL_POLICY_USERNAME} ${GLOBAL_POLICY_PASSWORD}
Log Creating session ${POLICY_ENDPOINT}
${session}= Create Session policy ${POLICY_HEALTHCHECK_ENDPOINT} auth=${auth}
${headers}= Create Dictionary Accept=application/json Content-Type=application/json
:FOR ${ELEMENT} IN @{ITEMS}
\ Should Be Equal As Strings ${ELEMENT['code']} 200
\ Should Be True ${ELEMENT['healthy']}
-
Run Policy Put Request
[Documentation] Runs Policy Put request
[Arguments] ${data_path} ${data}
${resp}= Put Request policy ${data_path} data=${data} headers=${headers}
Log Received response from policy ${resp.text}
[Return] ${resp}
-
Run Policy Delete Request
[Documentation] Runs Policy Delete request
[Arguments] ${data_path} ${data}
${resp}= Delete Request policy ${data_path} data=${data} headers=${headers}
Log Received response from policy ${resp.text}
[Return] ${resp}
-
Run Policy Get Configs Request
[Documentation] Runs Policy Get Configs request
[Arguments] ${data_path} ${data}
${headers}= Create Dictionary Accept=application/json Content-Type=application/json Authorization=Basic ${GLOBAL_POLICY_AUTH} ClientAuth=${GLOBAL_POLICY_CLIENTAUTH}
${resp}= Post Request policy ${data_path} data=${data} headers=${headers}
Log Received response from policy ${resp.text}
- [Return] ${resp}
\ No newline at end of file
+ [Return] ${resp}
*** Settings ***
Documentation The main interface for interacting with SDN-GC. It handles low level stuff like managing the http request library and SDN-GC required fields
Library RequestsLibrary
-Library UUID
+Library UUID
Library OperatingSystem
Library ExtendedSelenium2Library
Library Collections
Set To Dictionary ${valuemap} network=${GLOBAL_INJECTED_NETWORK}
Set To Dictionary ${valuemap} public_net_id=${GLOBAL_INJECTED_PUBLIC_NET_ID}
Set To Dictionary ${valuemap} cloud_env=${GLOBAL_INJECTED_CLOUD_ENV}
- Set To Dictionary ${valuemap} install_script_version=${GLOBAL_INJECTED_INSTALL_SCRIPT_VERSION}
+ Set To Dictionary ${valuemap} install_script_version=${GLOBAL_INJECTED_SCRIPT_VERSION}
Set To Dictionary ${valuemap} vm_image_name=${GLOBAL_INJECTED_VM_IMAGE_NAME}
- Set To Dictionary ${valuemap} vm_flavor_name=${GLOBAL_INJECTED_VM_FLAVOR_NAME}
+ Set To Dictionary ${valuemap} vm_flavor_name=${GLOBAL_INJECTED_VM_FLAVOR}
# update the value map with unique values.
Set To Dictionary ${valuemap} uuid=${uuid} hostid=${hostid} ecompnet=${ecompnet}
${parameters}= Create Dictionary
## Setup Browser is now being managed by the test case
## Setup Browser
Go To ${SDNGC_ADMIN_SIGNUP_URL}
- Maximize Browser Window
+ ##Maximize Browser Window
Set Selenium Speed ${GLOBAL_SELENIUM_DELAY}
Set Browser Implicit Wait ${GLOBAL_SELENIUM_BROWSER_IMPLICIT_WAIT}
Log Logging in to ${SDNGC_ADMIN_LOGIN_URL}
Input Password xpath=//input[@id='password'] ${shortened_uuid}
Click Button xpath=//button[@type='submit']
Title Should Be SDN-C AdminPortal
- Log Logged in to ${SDNGC_ADMIN_LOGIN_URL}
\ No newline at end of file
+ Log Logged in to ${SDNGC_ADMIN_LOGIN_URL}
--- /dev/null
+#{{ if not .Values.disableRobot }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: robot-eteshare-configmap
+ namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/eteshare/config/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: robot-resources-configmap
+ namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/robot/resources/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: lighttpd-authorization-configmap
+ namespace: {{ .Values.nsPrefix }}
+data:
+{{ tpl (.Files.Glob "resources/config/authorization").AsConfig . | indent 2 }}
+#{{ end }}
mountPath: /etc/localtime
readOnly: true
- name: robot-eteshare
- mountPath: /share
- - name: robot-resources-asdc-interface
+ mountPath: /share/config
+ - name: robot-resources
mountPath: /var/opt/OpenECOMP_ETE/robot/resources/asdc_interface.robot
- - name: robot-resources-policy-interface
+ subPath: asdc_interface.robot
+ - name: robot-resources
mountPath: /var/opt/OpenECOMP_ETE/robot/resources/policy_interface.robot
- - name: robot-resources-sdngc-interface
+ subPath: policy_interface.robot
+ - name: robot-resources
mountPath: /var/opt/OpenECOMP_ETE/robot/resources/sdngc_interface.robot
+ subPath: sdngc_interface.robot
- name: lighttpd-authorization
mountPath: /etc/lighttpd/authorization
+ subPath: authorization
ports:
- containerPort: 88
readinessProbe:
hostPath:
path: /etc/localtime
- name: robot-eteshare
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/eteshare
- - name: robot-resources-asdc-interface
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/resources/asdc_interface.robot
- - name: robot-resources-policy-interface
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/resources/policy_interface.robot
- - name: robot-resources-sdngc-interface
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/robot/resources/sdngc_interface.robot
+ configMap:
+ name: robot-eteshare-configmap
+ defaultMode: 0755
+ - name: robot-resources
+ configMap:
+ name: robot-resources-configmap
- name: lighttpd-authorization
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/robot/authorization
+ configMap:
+ name: lighttpd-authorization-configmap
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
#{{ end }}
pullPolicy: Always
nodePortPrefix: 302
robotReplicas: 1
+demoArtifactsVersion: "1.1.0-SNAPSHOT"
+openStackPrivateNetCidr: "192.168.30.0"
+openStackFlavourMedium: "m1.medium"
+openStackKeyStoneUrl: "http://1.2.3.4:5000"
+openStackPublicNetId: "e8f51958045716781ffc"
+openStackPrivateNetId: "e8f51956-00dd-4425-af36-045716781ffc"
+openStackOamNetworkCidrPrefix: "192.168.120"
+openStackEncryptedPassword: "c124921a3a0efbe579782cde8227681e"
+openStackRegion: "RegionOne"
+openStackPrivateSubnetId: "e8f51956-00dd-4425-af36-045716781ffc"
+openStackTenantId: "47899782ed714295b1151681fdfd51f5"
+openStackUserName: "vnf_user"
+ubuntuImage: "Ubuntu_14.04.5_LTS"
image:
testsuite: nexus3.onap.org:10001/openecomp/testsuite:1.1-STAGING-latest
-
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
--- /dev/null
+apiVersion: v1
+description: An ONAP environment setup chart
+name: setup
+version: 2.0.0
--- /dev/null
+dependencies:
+ - name: common
+ version: ~2.0.0
+ # local reference to common chart, as it is
+ # a part of this chart's package and will not
+ # be published independently to a repo (at this point)
+ repository: '@local'
\ No newline at end of file
-
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.namespace" . }}-binding
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
apiVersion: v1
kind: Secret
metadata:
- name: {{ include "common.name" . }}-docker-registry-key
+ name: {{ include "common.namespace" . }}-docker-registry-key
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
--- /dev/null
+global:
+ # image repositories
+ repository: nexus3.onap.org:10001
+ repositorySecret: eyJuZXh1czMub25hcC5vcmc6MTAwMDEiOnsidXNlcm5hbWUiOiJkb2NrZXIiLCJwYXNzd29yZCI6ImRvY2tlciIsImVtYWlsIjoiQCIsImF1dGgiOiJaRzlqYTJWeU9tUnZZMnRsY2c9PSJ9fQ==
apiVersion: v1
description: MariaDB Service
-name: so-mariadb
+name: mariadb
version: 2.0.0
http://{{ . }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
- export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
- export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
{{- else if contains "ClusterIP" .Values.service.type }}
- export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ include "common.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
{{- end }}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
- name: {{ template "common.name" . }}
+ name: {{ template "common.fullname" . }}
key: db-root-password
volumeMounts:
- mountPath: /var/lib/mysql
{{- if .Values.persistence.enabled }}
- name: mariadb-data
persistentVolumeClaim:
- claimName: {{ include "common.name" . }}
+ claimName: {{ include "common.fullname" . }}
{{- else }}
emptyDir: {}
{{- end }}
hostPath:
path: /etc/localtime
imagePullSecrets:
- - name: "{{ include "common.name" . }}-docker-registry-key"
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
kind: PersistentVolume
apiVersion: v1
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
+ name: {{ include "common.fullname" . }}
spec:
capacity:
storage: {{ .Values.persistence.size}}
persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
hostPath:
path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
{{ toYaml .Values.persistence.annotations | indent 4 }}
{{- end }}
spec:
+ selector:
+ matchLabels:
+ name: {{ include "common.fullname" . }}
accessModes:
- {{ .Values.persistence.accessMode }}
resources:
apiVersion: v1
kind: Secret
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
type: Opaque
data:
db-root-password: {{ .Values.config.mariadbRootPassword | b64enc | quote }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ include "common.name" . }}-docker-registry-key
- namespace: {{ include "common.namespace" . }}
- labels:
- app: {{ include "common.name" . }}
- chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-data:
- .dockercfg: {{ .Values.global.repositorySecret | default .Values.repositorySecret }}
-type: kubernetes.io/dockercfg
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
dependencies:
- - name: common-templates
+ - name: common
version: ~2.0.0
# local reference to common chart, as it is
# a part of this chart's package and will not
# be published independently to a repo (at this point)
- repository: file://../common/common-templates
\ No newline at end of file
+ repository: '@local'
\ No newline at end of file
http://{{ . }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
- export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
- export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export NODE_PORT=$(kubectl get --namespace {{ include "common.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.name" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ include "common.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "common.name" . }}'
- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.namespace" . }} {{ include "common.name" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.externalPort }}
{{- else if contains "ClusterIP" .Values.service.type }}
- export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ export POD_NAME=$(kubectl get pods --namespace {{ include "common.namespace" . }} -l "app={{ template "so.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:{{ .Values.service.internalPort }}
{{- end }}
+++ /dev/null
-
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
- name: {{ include "common.name" . }}
- namespace: {{ include "common.namespace" . }}
- labels:
- app: {{ include "common.name" . }}
- chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-subjects:
- - kind: ServiceAccount
- name: default
- namespace: {{ include "common.namespace" . }}
\ No newline at end of file
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
- /root/ready.py
args:
- --container-name
- - so-mariadb
+ - mariadb
env:
- name: NAMESPACE
valueFrom:
path: start-jboss-server.sh
mode: 0755
imagePullSecrets:
- - name: "{{ include "common.name" . }}-docker-registry-key"
+ - name: "{{ include "common.namespace" . }}-docker-registry-key"
+++ /dev/null
-apiVersion: v1
-kind: Namespace
-metadata:
- name: {{ include "common.namespace" . }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
- name: {{ include "common.name" . }}
+ name: {{ include "common.fullname" . }}
namespace: {{ include "common.namespace" . }}
labels:
app: {{ include "common.name" . }}
loggingRepository: docker.elastic.co
loggingImage: beats/filebeat:5.5.0
+subChartsOnly:
+ enabled: true
# application image
repository: nexus3.onap.org:10001