Merge "Update CMSO OOM to use HTTPS and AAF"
authorMike Elliott <mike.elliott@amdocs.com>
Fri, 10 May 2019 17:30:59 +0000 (17:30 +0000)
committerGerrit Code Review <gerrit@onap.org>
Fri, 10 May 2019 17:30:59 +0000 (17:30 +0000)
229 files changed:
.gitmodules
docs/cluster.yml [new file with mode: 0644]
docs/example-integration-override.yaml
docs/helm-search.txt
docs/images/cp_vms/control_plane_1.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_2.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_3.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_4.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_5.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_6.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_7.png [new file with mode: 0644]
docs/images/cp_vms/control_plane_8.png [new file with mode: 0644]
docs/images/floating_ips/floating_1.png [new file with mode: 0644]
docs/images/floating_ips/floating_2.png [new file with mode: 0644]
docs/images/keys/key_pair_1.png [new file with mode: 0644]
docs/images/keys/key_pair_2.png [new file with mode: 0644]
docs/images/keys/key_pair_3.png [new file with mode: 0644]
docs/images/keys/key_pair_4.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_1.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_10.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_2.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_3.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_4.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_5.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_6.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_7.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_8.png [new file with mode: 0644]
docs/images/nfs_server/nfs_server_9.png [new file with mode: 0644]
docs/images/rke/rke_1.png [new file with mode: 0644]
docs/images/wk_vms/worker_1.png [new file with mode: 0644]
docs/images/wk_vms/worker_2.png [new file with mode: 0644]
docs/images/wk_vms/worker_3.png [new file with mode: 0644]
docs/images/wk_vms/worker_4.png [new file with mode: 0644]
docs/images/wk_vms/worker_5.png [new file with mode: 0644]
docs/images/wk_vms/worker_6.png [new file with mode: 0644]
docs/images/wk_vms/worker_7.png [new file with mode: 0644]
docs/oom_cloud_setup_guide.rst
docs/oom_quickstart_guide.rst
docs/oom_setup_kubernetes_rancher.rst
docs/openstack-k8s-controlnode.sh [new file with mode: 0644]
docs/openstack-k8s-node.sh [deleted file]
docs/openstack-k8s-workernode.sh [new file with mode: 0644]
docs/openstack-nfs-server.sh [new file with mode: 0644]
docs/openstack-rancher.sh [deleted file]
kubernetes/aaf/.gitignore [new file with mode: 0644]
kubernetes/aaf/charts/aaf-cass/.helmignore [moved from kubernetes/aaf/charts/aaf-cs/.helmignore with 100% similarity]
kubernetes/aaf/charts/aaf-cass/Chart.yaml [moved from kubernetes/aaf/charts/aaf-cs/Chart.yaml with 97% similarity]
kubernetes/aaf/charts/aaf-cass/templates/NOTES.txt [moved from kubernetes/aaf/charts/aaf-cs/templates/NOTES.txt with 100% similarity]
kubernetes/aaf/charts/aaf-cass/templates/deployment.yaml [moved from kubernetes/aaf/charts/aaf-cs/templates/deployment.yaml with 59% similarity]
kubernetes/aaf/charts/aaf-cass/templates/pv.yaml [moved from kubernetes/aaf/charts/aaf-cs/templates/pv.yaml with 96% similarity]
kubernetes/aaf/charts/aaf-cass/templates/pvc.yaml [moved from kubernetes/aaf/charts/aaf-cs/templates/pvc.yaml with 96% similarity]
kubernetes/aaf/charts/aaf-cass/templates/service.yaml [new file with mode: 0644]
kubernetes/aaf/charts/aaf-cass/values.yaml [moved from kubernetes/aaf/charts/aaf-cs/values.yaml with 79% similarity]
kubernetes/aaf/charts/aaf-cm/templates/deployment.yaml
kubernetes/aaf/charts/aaf-cm/templates/service.yaml
kubernetes/aaf/charts/aaf-cm/values.yaml
kubernetes/aaf/charts/aaf-cs/templates/service.yaml [deleted file]
kubernetes/aaf/charts/aaf-fs/templates/deployment.yaml
kubernetes/aaf/charts/aaf-fs/templates/service.yaml
kubernetes/aaf/charts/aaf-fs/values.yaml
kubernetes/aaf/charts/aaf-gui/templates/deployment.yaml
kubernetes/aaf/charts/aaf-gui/templates/service.yaml
kubernetes/aaf/charts/aaf-gui/values.yaml
kubernetes/aaf/charts/aaf-hello/templates/deployment.yaml
kubernetes/aaf/charts/aaf-hello/templates/service.yaml
kubernetes/aaf/charts/aaf-hello/values.yaml
kubernetes/aaf/charts/aaf-locate/templates/deployment.yaml
kubernetes/aaf/charts/aaf-locate/templates/service.yaml
kubernetes/aaf/charts/aaf-locate/values.yaml
kubernetes/aaf/charts/aaf-oauth/templates/deployment.yaml
kubernetes/aaf/charts/aaf-oauth/templates/service.yaml
kubernetes/aaf/charts/aaf-oauth/values.yaml
kubernetes/aaf/charts/aaf-service/templates/deployment.yaml
kubernetes/aaf/charts/aaf-service/templates/service.yaml
kubernetes/aaf/charts/aaf-service/values.yaml
kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-quorumclient/values.yaml
kubernetes/aaf/charts/aaf-sms/charts/aaf-sms-vault/values.yaml
kubernetes/aaf/charts/aaf-sms/values.yaml
kubernetes/aaf/charts/aaf-sshsm/charts/aaf-sshsm-abrmd/values.yaml
kubernetes/aaf/charts/aaf-sshsm/charts/aaf-sshsm-distcenter/values.yaml
kubernetes/aaf/charts/aaf-sshsm/charts/aaf-sshsm-testca/values.yaml
kubernetes/aaf/charts/aaf-sshsm/values.yaml
kubernetes/aaf/templates/aaf-config-pv.yaml [new file with mode: 0644]
kubernetes/aaf/templates/aaf-config-pvc.yaml [new file with mode: 0644]
kubernetes/aaf/templates/aaf-status-pv.yaml [new file with mode: 0644]
kubernetes/aaf/templates/aaf-status-pvc.yaml [new file with mode: 0644]
kubernetes/aaf/values.yaml
kubernetes/aai
kubernetes/appc/charts/appc-ansible-server/templates/statefulset.yaml
kubernetes/appc/charts/appc-ansible-server/values.yaml
kubernetes/cds/charts/cds-blueprints-processor/resources/config/logback.xml
kubernetes/cds/charts/cds-blueprints-processor/templates/service.yaml
kubernetes/cds/charts/cds-blueprints-processor/values.yaml
kubernetes/cds/charts/cds-command-executor/values.yaml
kubernetes/cds/charts/cds-controller-blueprints/resources/config/logback.xml
kubernetes/cds/charts/cds-controller-blueprints/values.yaml
kubernetes/cds/charts/cds-sdc-listener/Chart.yaml [moved from kubernetes/vfc/charts/vfc-nokia-vnfm-driver/Chart.yaml with 81% similarity, mode: 0755]
kubernetes/cds/charts/cds-sdc-listener/requirements.yaml [new file with mode: 0755]
kubernetes/cds/charts/cds-sdc-listener/resources/config/application.yaml [new file with mode: 0644]
kubernetes/cds/charts/cds-sdc-listener/resources/config/logback.xml [new file with mode: 0644]
kubernetes/cds/charts/cds-sdc-listener/templates/configmap.yaml [moved from kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/configmap.yaml with 78% similarity]
kubernetes/cds/charts/cds-sdc-listener/templates/deployment.yaml [new file with mode: 0644]
kubernetes/cds/charts/cds-sdc-listener/templates/service.yaml [moved from kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/service.yaml with 61% similarity]
kubernetes/cds/charts/cds-sdc-listener/values.yaml [moved from kubernetes/vfc/charts/vfc-nokia-vnfm-driver/values.yaml with 54% similarity]
kubernetes/cds/charts/cds-ui/templates/deployment.yaml
kubernetes/cds/charts/cds-ui/values.yaml
kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/create-tables.sql
kubernetes/clamp/values.yaml
kubernetes/common/cassandra/resources/config/docker-entrypoint.sh [new file with mode: 0644]
kubernetes/common/cassandra/templates/configmap.yaml
kubernetes/common/cassandra/templates/statefulset.yaml
kubernetes/common/cassandra/values.yaml
kubernetes/common/dgbuilder/values.yaml
kubernetes/common/network-name-gen/values.yaml
kubernetes/contrib/tools/rke/rke_setup.sh
kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
kubernetes/dcaegen2/charts/dcae-config-binding-service/values.yaml
kubernetes/dcaegen2/charts/dcae-deployment-handler/values.yaml
kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
kubernetes/dcaegen2/charts/dcae-policy-handler/resources/config/config.json
kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml
kubernetes/dcaegen2/values.yaml
kubernetes/dmaap/components/dmaap-bc/resources/config/dmaapbc.properties
kubernetes/dmaap/components/dmaap-bc/values.yaml
kubernetes/dmaap/components/dmaap-dr-node/resources/config/drNodeCadi.properties
kubernetes/dmaap/components/dmaap-dr-node/resources/config/node.properties
kubernetes/dmaap/components/dmaap-dr-prov/resources/config/drProvCadi.properties
kubernetes/dmaap/components/dmaap-dr-prov/resources/config/provserver.properties
kubernetes/dmaap/components/message-router/charts/message-router-kafka/values.yaml
kubernetes/dmaap/components/message-router/charts/message-router-mirrormaker/values.yaml
kubernetes/dmaap/components/message-router/charts/message-router-zookeeper/values.yaml
kubernetes/dmaap/components/message-router/resources/config/dmaap/cadi.properties
kubernetes/dmaap/components/message-router/resources/mr_clusters/san-francisco.json
kubernetes/dmaap/components/message-router/values.yaml
kubernetes/dmaap/values.yaml
kubernetes/esr/charts/esr-gui/values.yaml
kubernetes/esr/charts/esr-server/values.yaml
kubernetes/multicloud/charts/multicloud-ocata/values.yaml
kubernetes/multicloud/charts/multicloud-pike/values.yaml
kubernetes/multicloud/charts/multicloud-starlingx/values.yaml
kubernetes/multicloud/charts/multicloud-vio/values.yaml
kubernetes/multicloud/charts/multicloud-windriver/values.yaml
kubernetes/multicloud/values.yaml
kubernetes/onap/resources/overrides/onap-all.yaml
kubernetes/onap/resources/overrides/onap-vfw.yaml [new file with mode: 0644]
kubernetes/onap/resources/overrides/openstack.yaml [new file with mode: 0644]
kubernetes/onap/values.yaml
kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/base.conf
kubernetes/policy/charts/policy-apex-pdp/resources/config/OnapPfConfig.json
kubernetes/policy/charts/policy-api/resources/config/config.json
kubernetes/policy/charts/policy-pap/resources/config/config.json
kubernetes/policy/charts/policy-xacml-pdp/resources/config/xacml.properties [new file with mode: 0644]
kubernetes/policy/resources/config/pe/console.conf
kubernetes/policy/resources/config/pe/push-policies.sh
kubernetes/robot/demo-k8s.sh
kubernetes/robot/ete-k8s.sh
kubernetes/robot/eteHelm-k8s.sh
kubernetes/robot/resources/config/eteshare/config/integration_robot_properties.py
kubernetes/robot/resources/config/eteshare/config/vm_properties.py
kubernetes/robot/values.yaml
kubernetes/sdc/charts/sdc-be/values.yaml
kubernetes/sdc/charts/sdc-cs/values.yaml
kubernetes/sdc/charts/sdc-dcae-be/values.yaml
kubernetes/sdc/charts/sdc-dcae-dt/values.yaml
kubernetes/sdc/charts/sdc-dcae-fe/values.yaml
kubernetes/sdc/charts/sdc-dcae-tosca-lab/values.yaml
kubernetes/sdc/charts/sdc-es/templates/pv.yaml
kubernetes/sdc/charts/sdc-es/templates/pvc.yaml
kubernetes/sdc/charts/sdc-es/values.yaml
kubernetes/sdc/charts/sdc-fe/values.yaml
kubernetes/sdc/charts/sdc-kb/values.yaml
kubernetes/sdc/charts/sdc-onboarding-be/templates/deployment.yaml
kubernetes/sdc/charts/sdc-onboarding-be/templates/pv.yaml [new file with mode: 0644]
kubernetes/sdc/charts/sdc-onboarding-be/templates/pvc.yaml [new file with mode: 0644]
kubernetes/sdc/charts/sdc-onboarding-be/values.yaml
kubernetes/sdc/charts/sdc-wfd-be/values.yaml
kubernetes/sdc/charts/sdc-wfd-fe/templates/deployment.yaml
kubernetes/sdc/charts/sdc-wfd-fe/templates/service.yaml
kubernetes/sdc/charts/sdc-wfd-fe/values.yaml
kubernetes/sdnc/charts/dmaap-listener/values.yaml
kubernetes/sdnc/charts/sdnc-ansible-server/values.yaml
kubernetes/sdnc/charts/sdnc-portal/values.yaml
kubernetes/sdnc/charts/ueb-listener/values.yaml
kubernetes/sdnc/templates/statefulset.yaml
kubernetes/sdnc/values.yaml
kubernetes/so/charts/so-bpmn-infra/resources/config/overrides/override.yaml
kubernetes/so/charts/so-bpmn-infra/values.yaml
kubernetes/so/charts/so-catalog-db-adapter/values.yaml
kubernetes/so/charts/so-monitoring/values.yaml
kubernetes/so/charts/so-openstack-adapter/values.yaml
kubernetes/so/charts/so-request-db-adapter/values.yaml
kubernetes/so/charts/so-sdc-controller/values.yaml
kubernetes/so/charts/so-sdnc-adapter/values.yaml
kubernetes/so/charts/so-vfc-adapter/values.yaml
kubernetes/so/charts/so-vnfm-adapter/values.yaml
kubernetes/so/values.yaml
kubernetes/uui/charts/uui-server/templates/service.yaml
kubernetes/vfc/charts/vfc-catalog/templates/deployment.yaml
kubernetes/vfc/charts/vfc-catalog/values.yaml
kubernetes/vfc/charts/vfc-ems-driver/values.yaml
kubernetes/vfc/charts/vfc-generic-vnfm-driver/templates/deployment.yaml
kubernetes/vfc/charts/vfc-generic-vnfm-driver/values.yaml
kubernetes/vfc/charts/vfc-huawei-vnfm-driver/values.yaml
kubernetes/vfc/charts/vfc-juju-vnfm-driver/values.yaml
kubernetes/vfc/charts/vfc-multivim-proxy/values.yaml
kubernetes/vfc/charts/vfc-nokia-v2vnfm-driver/values.yaml
kubernetes/vfc/charts/vfc-nokia-vnfm-driver/.helmignore [deleted file]
kubernetes/vfc/charts/vfc-nokia-vnfm-driver/resources/config/logging/logback.xml [deleted file]
kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml [deleted file]
kubernetes/vfc/charts/vfc-nslcm/templates/deployment.yaml
kubernetes/vfc/charts/vfc-nslcm/values.yaml
kubernetes/vfc/charts/vfc-redis/.helmignore [moved from kubernetes/vfc/charts/vfc-db/.helmignore with 100% similarity]
kubernetes/vfc/charts/vfc-redis/Chart.yaml [moved from kubernetes/vfc/charts/vfc-db/Chart.yaml with 97% similarity]
kubernetes/vfc/charts/vfc-redis/templates/deployment.yaml [moved from kubernetes/vfc/charts/vfc-db/templates/deployment.yaml with 100% similarity]
kubernetes/vfc/charts/vfc-redis/templates/service.yaml [moved from kubernetes/vfc/charts/vfc-db/templates/service.yaml with 100% similarity]
kubernetes/vfc/charts/vfc-redis/values.yaml [moved from kubernetes/vfc/charts/vfc-db/values.yaml with 100% similarity]
kubernetes/vfc/charts/vfc-resmgr/values.yaml
kubernetes/vfc/charts/vfc-vnflcm/templates/deployment.yaml
kubernetes/vfc/charts/vfc-vnflcm/values.yaml
kubernetes/vfc/charts/vfc-vnfmgr/templates/deployment.yaml
kubernetes/vfc/charts/vfc-vnfmgr/values.yaml
kubernetes/vfc/charts/vfc-vnfres/templates/deployment.yaml
kubernetes/vfc/charts/vfc-vnfres/values.yaml
kubernetes/vfc/charts/vfc-workflow-engine/values.yaml
kubernetes/vfc/charts/vfc-workflow/values.yaml
kubernetes/vfc/charts/vfc-zte-sdnc-driver/values.yaml
kubernetes/vfc/charts/vfc-zte-vnfm-driver/values.yaml
kubernetes/vid/values.yaml

index b54f5d9..31bff54 100644 (file)
@@ -1,3 +1,5 @@
 [submodule "kubernetes/aai"]
        path = kubernetes/aai
-       url = https://gerrit.onap.org/r/aai/oom
+       url = ../aai/oom
+        branch = .
+        ignore = dirty
diff --git a/docs/cluster.yml b/docs/cluster.yml
new file mode 100644 (file)
index 0000000..d4962d3
--- /dev/null
@@ -0,0 +1,156 @@
+# An example of an HA Kubernetes cluster for ONAP
+nodes:
+- address: 10.12.6.85
+  port: "22"
+  internal_address: 10.0.0.8
+  role:
+  - controlplane
+  - etcd
+  hostname_override: "onap-control-1"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.90
+  port: "22"
+  internal_address: 10.0.0.11
+  role:
+  - controlplane
+  - etcd
+  hostname_override: "onap-control-2"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.89
+  port: "22"
+  internal_address: 10.0.0.12
+  role:
+  - controlplane
+  - etcd
+  hostname_override: "onap-control-3"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.165
+  port: "22"
+  internal_address: 10.0.0.14
+  role:
+  - worker
+  hostname_override: "onap-k8s-1"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.238
+  port: "22"
+  internal_address: 10.0.0.26
+  role:
+  - worker
+  hostname_override: "onap-k8s-2"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.126
+  port: "22"
+  internal_address: 10.0.0.5
+  role:
+  - worker
+  hostname_override: "onap-k8s-3"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.11
+  port: "22"
+  internal_address: 10.0.0.6
+  role:
+  - worker
+  hostname_override: "onap-k8s-4"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.244
+  port: "22"
+  internal_address: 10.0.0.9
+  role:
+  - worker
+  hostname_override: "onap-k8s-5"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.249
+  port: "22"
+  internal_address: 10.0.0.17
+  role:
+  - worker
+  hostname_override: "onap-k8s-6"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.191
+  port: "22"
+  internal_address: 10.0.0.20
+  role:
+  - worker
+  hostname_override: "onap-k8s-7"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.111
+  port: "22"
+  internal_address: 10.0.0.10
+  role:
+  - worker
+  hostname_override: "onap-k8s-8"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.195
+  port: "22"
+  internal_address: 10.0.0.4
+  role:
+  - worker
+  hostname_override: "onap-k8s-9"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.5.160
+  port: "22"
+  internal_address: 10.0.0.16
+  role:
+  - worker
+  hostname_override: "onap-k8s-10"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.74
+  port: "22"
+  internal_address: 10.0.0.18
+  role:
+  - worker
+  hostname_override: "onap-k8s-11"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+- address: 10.12.6.82
+  port: "22"
+  internal_address: 10.0.0.7
+  role:
+  - worker
+  hostname_override: "onap-k8s-12"
+  user: ubuntu
+  ssh_key_path: "~/.ssh/onap-key"
+services:
+  kube-api:
+    service_cluster_ip_range: 10.43.0.0/16
+    pod_security_policy: false
+    always_pull_images: false
+  kube-controller:
+    cluster_cidr: 10.42.0.0/16
+    service_cluster_ip_range: 10.43.0.0/16
+  kubelet:
+    cluster_domain: cluster.local
+    cluster_dns_server: 10.43.0.10
+    fail_swap_on: false
+network:
+  plugin: canal
+authentication:
+  strategy: x509
+ssh_key_path: "~/.ssh/onap-key"
+ssh_agent_auth: false
+authorization:
+  mode: rbac
+ignore_docker_version: false
+kubernetes_version: "v1.13.5-rancher1-2"
+private_registries:
+- url: nexus3.onap.org:10001
+  user: docker
+  password: docker
+  is_default: true
+cluster_name: "onap"
+restore:
+  restore: false
+  snapshot_name: ""
index 9c336d6..56699d9 100644 (file)
@@ -1,36 +1,46 @@
 global:
   repository: 10.12.5.2:5000
   pullPolicy: IfNotPresent
+#################################################################
+# This override file configures openstack parameters for ONAP
+#################################################################
+appc:
+  config:
+    enableClustering: false
+    openStackType: "OpenStackProvider"
+    openStackName: "OpenStack"
+    openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+    openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE"
+    openStackDomain: "Default"
+    openStackUserName: "OPENSTACK_USERNAME_HERE"
+    openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 robot:
-  enabled: true
-  flavor: large
   appcUsername: "appc@appc.onap.org"
-  appcPassword: "APPC_PASSWORD_HERE"
+  appcPassword: "demo123456!"
   openStackKeyStoneUrl: "http://10.12.25.2:5000"
   openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4"
   openStackTenantId: "09d8566ea45e43aa974cf447ed591d77"
   openStackUserName: "OPENSTACK_USERNAME_HERE"
   ubuntu14Image: "ubuntu-14-04-cloud-amd64"
   ubuntu16Image: "ubuntu-16-04-cloud-amd64"
-  openStackPrivateNetId: "d4ab89ff-c735-4ce4-93f6-cff445157b98"
-  openStackPrivateSubnetId: "46c2391c-ed98-4fb0-8ab7-88678bc55b9f"
+  openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313"
+  openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3"
   openStackPrivateNetCidr: "10.0.0.0/16"
-  openStackSecurityGroup: "3914301b-2996-414f-ba0a-da4b2275a753"
+  openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0"
   openStackOamNetworkCidrPrefix: "10.0"
-  dcaeCollectorIp: "10.12.5.46"
+  dcaeCollectorIp: "10.12.6.88"
   vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
-  demoArtifactsVersion: "1.3.0"
+  demoArtifactsVersion: "1.4.0-SNAPSHOT"
   demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
-  scriptVersion: "1.3.0"
-  rancherIpAddress: "10.12.6.38"
+  scriptVersion: "1.4.0-SNAPSHOT"
+  rancherIpAddress: "10.12.5.127"
   config:
-    openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HEREXXXXXXXXXXXXXXXX"
+    # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
+    openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 so:
-  enabled: true
+  # so server configuration
   so-catalog-db-adapter:
     config:
       openStackUserName: "OPENSTACK_USERNAME_HERE"
       openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
-      openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HEREXXXXXXXXXXXXXXXX"
-
-
+      openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
\ No newline at end of file
index db95e4f..036ad03 100644 (file)
@@ -1,31 +1,42 @@
 NAME                   CHART VERSION   APP VERSION     DESCRIPTION                                 
-local/onap             4.0.0           Dublin          Open Network Automation Platform (ONAP)     
-local/aaf              4.0.0                           ONAP Application Authorization Framework    
-local/aai              4.0.0                           ONAP Active and Available Inventory         
-local/cassandra        4.0.0                           ONAP cassandra                              
-local/cds              4.0.0                           ONAP Common Design Studio                   
-local/clamp            4.0.0                           ONAP Clamp                                  
-local/cli              4.0.0                           ONAP Command Line Interface                 
-local/consul           4.0.0                           ONAP Consul Agent                           
-local/contrib          4.0.0                           ONAP optional tools                         
-local/dcaegen2         4.0.0                           ONAP DCAE Gen2                              
-local/dmaap            4.0.1                           ONAP DMaaP components                       
-local/esr              4.0.0                           ONAP External System Register               
-local/log              4.0.0                           ONAP Logging ElasticStack                   
-local/msb              4.0.0                           ONAP MicroServices Bus                      
-local/multicloud       4.0.0                           ONAP multicloud broker                      
-local/nbi              4.0.0                           ONAP Northbound Interface                   
-local/oof              4.0.0                           ONAP Optimization Framework                 
-local/pnda             4.0.0                           ONAP DCAE PNDA                              
-local/policy           4.0.0                           ONAP Policy Administration Point            
-local/pomba            4.0.0                           ONAP Post Orchestration Model Based Audit   
-local/portal           4.0.0                           ONAP Web Portal                             
-local/postgres         4.0.0                           ONAP Postgres Server                        
-local/robot            4.0.0                           A helm Chart for kubernetes-ONAP Robot      
-local/sdnc-prom        4.0.0                           ONAP SDNC Policy Driven Ownership Management
-local/sniro-emulator   4.0.0                           ONAP Mock Sniro Emulator                    
-local/so               4.0.0                           ONAP Service Orchestrator                   
-local/uui              4.0.0                           ONAP uui                                    
-local/vfc              4.0.0                           ONAP Virtual Function Controller (VF-C)     
-local/vid              4.0.0                           ONAP Virtual Infrastructure Deployment      
-local/vnfsdk           4.0.0                           ONAP VNF SDK 
+local/onap                     4.0.0           Dublin  Open Network Automation Platform (ONAP)
+local/aaf                      4.0.0                   ONAP Application Authorization Framework
+local/aai                      4.0.0                   ONAP Active and Available Inventory
+local/appc                     4.0.0                   Application Controller
+local/cassandra                4.0.0                   ONAP cassandra
+local/cds                      4.0.0                   ONAP Controller Design Studio (CDS)
+local/clamp                    4.0.0                   ONAP Clamp
+local/cli                      4.0.0                   ONAP Command Line Interface
+local/common                   4.0.0                   Common templates for inclusion in other charts
+local/consul                   4.0.0                   ONAP Consul Agent
+local/contrib                  4.0.0                   ONAP optional tools
+local/dcaegen2                 4.0.0                   ONAP DCAE Gen2
+local/dgbuilder                4.0.0                   D.G. Builder application
+local/dmaap                    4.0.1                   ONAP DMaaP components
+local/esr                      4.0.0                   ONAP External System Register
+local/log                      4.0.0                   ONAP Logging ElasticStack
+local/mariadb-galera           4.0.0                   Chart for MariaDB Galera cluster
+local/mongo                    4.0.0                   MongoDB Server
+local/msb                      4.0.0                   ONAP MicroServices Bus
+local/multicloud               4.0.0                   ONAP multicloud broker
+local/music                    4.0.0                   MUSIC - Multi-site State Coordination Service
+local/mysql                    4.0.0                   MySQL Server
+local/nbi                      4.0.0                   ONAP Northbound Interface
+local/network-name-gen         4.0.0                   Name Generation Micro Service
+local/nfs-provisioner          4.0.0                   NFS provisioner
+local/oof                      4.0.0                   ONAP Optimization Framework
+local/pnda                     4.0.0                   ONAP DCAE PNDA
+local/policy                   4.0.0                   ONAP Policy Administration Point
+local/pomba                    4.0.0                   ONAP Post Orchestration Model Based Audit
+local/portal                   4.0.0                   ONAP Web Portal
+local/postgres                 4.0.0                   ONAP Postgres Server
+local/robot                    4.0.0                   A helm Chart for kubernetes-ONAP Robot
+local/sdc                      4.0.0                   Service Design and Creation Umbrella Helm charts
+local/sdnc                     4.0.0                   SDN Controller
+local/sdnc-prom                4.0.0                   ONAP SDNC Policy Driven Ownership Management
+local/sniro-emulator           4.0.0                   ONAP Mock Sniro Emulator
+local/so                       4.0.0                   ONAP Service Orchestrator
+local/uui                      4.0.0                   ONAP uui
+local/vfc                      4.0.0                   ONAP Virtual Function Controller (VF-C)
+local/vid                      4.0.0                   ONAP Virtual Infrastructure Deployment
+local/vnfsdk                   4.0.0                   ONAP VNF SDK
\ No newline at end of file
diff --git a/docs/images/cp_vms/control_plane_1.png b/docs/images/cp_vms/control_plane_1.png
new file mode 100644 (file)
index 0000000..d59b986
Binary files /dev/null and b/docs/images/cp_vms/control_plane_1.png differ
diff --git a/docs/images/cp_vms/control_plane_2.png b/docs/images/cp_vms/control_plane_2.png
new file mode 100644 (file)
index 0000000..9a7d72f
Binary files /dev/null and b/docs/images/cp_vms/control_plane_2.png differ
diff --git a/docs/images/cp_vms/control_plane_3.png b/docs/images/cp_vms/control_plane_3.png
new file mode 100644 (file)
index 0000000..da329f2
Binary files /dev/null and b/docs/images/cp_vms/control_plane_3.png differ
diff --git a/docs/images/cp_vms/control_plane_4.png b/docs/images/cp_vms/control_plane_4.png
new file mode 100644 (file)
index 0000000..817355a
Binary files /dev/null and b/docs/images/cp_vms/control_plane_4.png differ
diff --git a/docs/images/cp_vms/control_plane_5.png b/docs/images/cp_vms/control_plane_5.png
new file mode 100644 (file)
index 0000000..33805c5
Binary files /dev/null and b/docs/images/cp_vms/control_plane_5.png differ
diff --git a/docs/images/cp_vms/control_plane_6.png b/docs/images/cp_vms/control_plane_6.png
new file mode 100644 (file)
index 0000000..9e8ab63
Binary files /dev/null and b/docs/images/cp_vms/control_plane_6.png differ
diff --git a/docs/images/cp_vms/control_plane_7.png b/docs/images/cp_vms/control_plane_7.png
new file mode 100644 (file)
index 0000000..f0db6d3
Binary files /dev/null and b/docs/images/cp_vms/control_plane_7.png differ
diff --git a/docs/images/cp_vms/control_plane_8.png b/docs/images/cp_vms/control_plane_8.png
new file mode 100644 (file)
index 0000000..e20f631
Binary files /dev/null and b/docs/images/cp_vms/control_plane_8.png differ
diff --git a/docs/images/floating_ips/floating_1.png b/docs/images/floating_ips/floating_1.png
new file mode 100644 (file)
index 0000000..9f41316
Binary files /dev/null and b/docs/images/floating_ips/floating_1.png differ
diff --git a/docs/images/floating_ips/floating_2.png b/docs/images/floating_ips/floating_2.png
new file mode 100644 (file)
index 0000000..0001ef0
Binary files /dev/null and b/docs/images/floating_ips/floating_2.png differ
diff --git a/docs/images/keys/key_pair_1.png b/docs/images/keys/key_pair_1.png
new file mode 100644 (file)
index 0000000..1135c93
Binary files /dev/null and b/docs/images/keys/key_pair_1.png differ
diff --git a/docs/images/keys/key_pair_2.png b/docs/images/keys/key_pair_2.png
new file mode 100644 (file)
index 0000000..ac3bfc5
Binary files /dev/null and b/docs/images/keys/key_pair_2.png differ
diff --git a/docs/images/keys/key_pair_3.png b/docs/images/keys/key_pair_3.png
new file mode 100644 (file)
index 0000000..1e0c020
Binary files /dev/null and b/docs/images/keys/key_pair_3.png differ
diff --git a/docs/images/keys/key_pair_4.png b/docs/images/keys/key_pair_4.png
new file mode 100644 (file)
index 0000000..031a9ba
Binary files /dev/null and b/docs/images/keys/key_pair_4.png differ
diff --git a/docs/images/nfs_server/nfs_server_1.png b/docs/images/nfs_server/nfs_server_1.png
new file mode 100644 (file)
index 0000000..912a10f
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_1.png differ
diff --git a/docs/images/nfs_server/nfs_server_10.png b/docs/images/nfs_server/nfs_server_10.png
new file mode 100644 (file)
index 0000000..7d87d1c
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_10.png differ
diff --git a/docs/images/nfs_server/nfs_server_2.png b/docs/images/nfs_server/nfs_server_2.png
new file mode 100644 (file)
index 0000000..d59b986
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_2.png differ
diff --git a/docs/images/nfs_server/nfs_server_3.png b/docs/images/nfs_server/nfs_server_3.png
new file mode 100644 (file)
index 0000000..9a7d72f
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_3.png differ
diff --git a/docs/images/nfs_server/nfs_server_4.png b/docs/images/nfs_server/nfs_server_4.png
new file mode 100644 (file)
index 0000000..da329f2
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_4.png differ
diff --git a/docs/images/nfs_server/nfs_server_5.png b/docs/images/nfs_server/nfs_server_5.png
new file mode 100644 (file)
index 0000000..817355a
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_5.png differ
diff --git a/docs/images/nfs_server/nfs_server_6.png b/docs/images/nfs_server/nfs_server_6.png
new file mode 100644 (file)
index 0000000..33805c5
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_6.png differ
diff --git a/docs/images/nfs_server/nfs_server_7.png b/docs/images/nfs_server/nfs_server_7.png
new file mode 100644 (file)
index 0000000..9e8ab63
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_7.png differ
diff --git a/docs/images/nfs_server/nfs_server_8.png b/docs/images/nfs_server/nfs_server_8.png
new file mode 100644 (file)
index 0000000..14103fb
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_8.png differ
diff --git a/docs/images/nfs_server/nfs_server_9.png b/docs/images/nfs_server/nfs_server_9.png
new file mode 100644 (file)
index 0000000..aa8bc14
Binary files /dev/null and b/docs/images/nfs_server/nfs_server_9.png differ
diff --git a/docs/images/rke/rke_1.png b/docs/images/rke/rke_1.png
new file mode 100644 (file)
index 0000000..b27fc51
Binary files /dev/null and b/docs/images/rke/rke_1.png differ
diff --git a/docs/images/wk_vms/worker_1.png b/docs/images/wk_vms/worker_1.png
new file mode 100644 (file)
index 0000000..01314d1
Binary files /dev/null and b/docs/images/wk_vms/worker_1.png differ
diff --git a/docs/images/wk_vms/worker_2.png b/docs/images/wk_vms/worker_2.png
new file mode 100644 (file)
index 0000000..9a7d72f
Binary files /dev/null and b/docs/images/wk_vms/worker_2.png differ
diff --git a/docs/images/wk_vms/worker_3.png b/docs/images/wk_vms/worker_3.png
new file mode 100644 (file)
index 0000000..93d5e28
Binary files /dev/null and b/docs/images/wk_vms/worker_3.png differ
diff --git a/docs/images/wk_vms/worker_4.png b/docs/images/wk_vms/worker_4.png
new file mode 100644 (file)
index 0000000..817355a
Binary files /dev/null and b/docs/images/wk_vms/worker_4.png differ
diff --git a/docs/images/wk_vms/worker_5.png b/docs/images/wk_vms/worker_5.png
new file mode 100644 (file)
index 0000000..33805c5
Binary files /dev/null and b/docs/images/wk_vms/worker_5.png differ
diff --git a/docs/images/wk_vms/worker_6.png b/docs/images/wk_vms/worker_6.png
new file mode 100644 (file)
index 0000000..c71c122
Binary files /dev/null and b/docs/images/wk_vms/worker_6.png differ
diff --git a/docs/images/wk_vms/worker_7.png b/docs/images/wk_vms/worker_7.png
new file mode 100644 (file)
index 0000000..ecb13c1
Binary files /dev/null and b/docs/images/wk_vms/worker_7.png differ
index 7a5074f..1ce260e 100644 (file)
@@ -45,14 +45,14 @@ The versions of Kubernetes that are supported by OOM are as follows:
 
 .. table:: OOM Software Requirements
 
-  ==============     ===========  =====  ========  ========
-  Release            Kubernetes   Helm   kubectl   Docker
-  ==============     ===========  =====  ========  ========
-  amsterdam          1.7.x        2.3.x  1.7.x     1.12.x
-  beijing            1.8.10       2.8.2  1.8.10    17.03.x
-  casablanca         1.11.5       2.9.1  1.11.5    17.03.x
-  dublin             1.13.5       2.12.3 1.13.5    18.09.5
-  ==============     ===========  =====  ========  ========
+  ==============     ===========  ======  ========  ========
+  Release            Kubernetes   Helm    kubectl   Docker
+  ==============     ===========  ======  ========  ========
+  amsterdam          1.7.x        2.3.x   1.7.x     1.12.x
+  beijing            1.8.10       2.8.2   1.8.10    17.03.x
+  casablanca         1.11.5       2.9.1   1.11.5    17.03.x
+  dublin             1.13.5       2.12.3  1.13.5    18.09.5
+  ==============     ===========  ======  ========  ========
 
 Minimum Hardware Configuration
 ==============================
index 0e1d359..501deda 100644 (file)
@@ -1,7 +1,7 @@
 .. This work is licensed under a
 .. Creative Commons Attribution 4.0 International License.
 .. http://creativecommons.org/licenses/by/4.0
-.. Copyright 2018 Amdocs, Bell Canada
+.. Copyright 2019 Amdocs, Bell Canada
 
 .. _quick-start-label:
 
@@ -17,29 +17,33 @@ available), follow the following instructions to deploy ONAP.
 
 **Step 1.** Clone the OOM repository from ONAP gerrit::
 
-  > git clone -b 4.0.0-ONAP http://gerrit.onap.org/r/oom
+  > git clone -b 4.0.0-ONAP http://gerrit.onap.org/r/oom --recurse-submodules
   > cd oom/kubernetes
 
-**Step 2.** Install Helm Plugins required to deploy the ONAP Casablanca release::
+**Step 2.** Install Helm Plugins required to deploy ONAP::
 
   > sudo cp -R ~/oom/kubernetes/helm/plugins/ ~/.helm
 
 
-**Step 3.** Customize the helm charts like onap.values.yaml or an override.yaml
-like integration-override.yaml file to suit your deployment with items like the
+**Step 3.** Customize the helm charts like oom/kubernetes/onap/values.yaml or an override
+file like onap-all.yaml, onap-vfw.yaml or openstack.yaml file to suit your deployment with items like the
 OpenStack tenant information.
 
+.. note::
+  Standard and example override files (e.g. onap-all.yaml, openstack.yaml) can be found in 
+  the oom/kubernetes/onap/resources/overrides/ directory.
+
 
  a. You may want to selectively enable or disable ONAP components by changing
     the `enabled: true/false` flags.
 
 
  b. Encyrpt the OpenStack password using the shell tool for robot and put it in
-    the robot helm charts or robot section of integration-override.yaml
+    the robot helm charts or robot section of openstack.yaml
 
 
  c. Encrypt the OpenStack password using the java based script for SO helm charts
-    or SO section of integration-override.yaml.
+    or SO section of openstack.yaml.
 
 
  d. Update the OpenStack parameters that will be used by robot, SO and APPC helm
@@ -49,8 +53,8 @@ OpenStack tenant information.
 
 
 a. Enabling/Disabling Components:
-Here is an example of the nominal entries that need to be provided. We have different
-values file available for different contexts.
+Here is an example of the nominal entries that need to be provided.
+We have different values file available for different contexts.
 
 .. literalinclude:: onap-values.yaml
    :language: yaml
@@ -63,9 +67,9 @@ openssl algorithm that works with the python based Robot Framework.
 .. note::
   To generate ROBOT openStackEncryptedPasswordHere :
 
-  ``root@olc-rancher:~# cd so/resources/config/mso/``
+  ``cd so/resources/config/mso/``
 
-  ``root@olc-rancher:~/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
+  ``/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
 
 c. Generating SO Encrypted Password:
 The SO Encrypted Password uses a java based encryption utility since the
@@ -120,20 +124,24 @@ follows::
 **Step 8.** Once the repo is setup, installation of ONAP can be done with a
 single command
 
- a. If you updated the values directly use this command::
+.. note::
+  The --timeout 900 is currently required in Dublin to address long running initialization tasks
+  for DMaaP and SO. Without this timeout value both applications may fail to deploy.
 
-    > helm deploy dev local/onap --namespace onap
+ a. To deploy all ONAP applications use this command::
 
+    > cd oom/kubernetes
+    > helm deploy dev local/onap --namespace onap -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/openstack.yaml --timeout 900
 
- b. If you are using an integration-override.yaml file use this command::
+ b. If you are using a custom override (e.g. integration-override.yaml) use this command::
 
-    > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap
+    > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap --timeout 900
 
 
  c. If you have a slower cloud environment you may want to use the public-cloud.yaml
     which has longer delay intervals on database updates.::
 
-    > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap
+    > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap --timeout 900
 
 
 **Step 9.** Commands to interact with the OOM installation
@@ -141,7 +149,7 @@ single command
 Use the following to monitor your deployment and determine when ONAP is
 ready for use::
 
-  > kubectl get pods --all-namespaces -o=wide
+  > kubectl get pods -n onap -o=wide
 
 Undeploying onap can be done using the following command::
 
index 5159377..3ccde8d 100644 (file)
 
 .. _onap-on-kubernetes-with-rancher:
 
-ONAP on Kubernetes with Rancher
-###############################
+ONAP on HA Kubernetes Cluster
+#############################
 
-The following instructions will step you through the installation of Kubernetes
-on an OpenStack environment with Rancher.  The development lab used for this
-installation is the ONAP Windriver lab.
+This guide provides instructions on how to setup a Highly-Available Kubernetes Cluster.
+For this, we are hosting our cluster on OpenStack VMs and using the Rancher Kubernetes Engine (RKE)
+to deploy and manage our Kubernetes Cluster.
 
-This guide does not cover all of the steps required to setup your OpenStack
-environment: e.g. OAM networks and security groups but there is a wealth of
-OpenStack information on the web.
+.. contents::
+   :depth: 1
+   :local:
+..
 
-Rancher Installation
-====================
+The result at the end of this tutorial will be:
 
-The following instructions describe how to create an Openstack VM running
-Rancher. This node will not be used to host ONAP itself, it will be used
-exclusively by Rancher.
+*1.* Creation of a Key Pair to use with Open Stack and RKE
 
-Launch new VM instance to host the Rancher Server
--------------------------------------------------
+*2.* Creation of OpenStack VMs to host Kubernetes Control Plane
 
-.. image:: Rancher-Launch_new_VM_instance_to_host_the_Rancher_Server.jpeg
+*3.* Creation of OpenStack VMs to host Kubernetes Workers
 
-Select Ubuntu 16.04 as base image
+*4.* Installation and configuration of RKE to setup an HA Kubernetes
+
+*5.* Installation and configuration of kubectl
+
+*5.* Installation and configuration of helm
+
+*7.* Creation of an NFS Server to be used by ONAP as shared persistance
+
+There are many ways one can execute the above steps. Including automation through the use of HEAT to setup the OpenStack VMs.
+To better illustrate the steps involved, we have captured the manual creation of such an environment using the ONAP Wind River Open Lab.
+
+Create Key Pair
+===============
+A Key Pair is required to access the created OpenStack VMs and will be used by
+RKE to configure the VMs for Kubernetes.
+
+Use an existing key pair, import one or create a new one to assign.
+
+.. image:: images/keys/key_pair_1.png
+
+.. Note::
+  If you're creating a new Key Pair, ensure to create a local copy of the Private Key through the use of "Copy Private Key to Clipboard".
+
+For the purpose of this guide, we will assume a new local key called "onap-key"
+has been downloaded and is copied into **~/.ssh/**, from which it can be referenced.
+
+Example:
+  > mv onap-key ~/.ssh
+
+  > chmod 600 ~/.ssh/onap-key
+
+
+Create Kubernetes Control Plane VMs
+===================================
+
+The following instructions describe how to create 3 OpenStack VMs to host the
+Highly-Available Kubernetes Control Plane.
+ONAP workloads will not be scheduled on these Control Plane nodes.
+
+Launch new VM instances
+-----------------------
+
+.. image:: images/cp_vms/control_plane_1.png
+
+Select Ubuntu 18.04 as base image
 ---------------------------------
-Select "No" on "Create New Volume"
+Select "No" for "Create New Volume"
 
-.. image:: Rancher-Select_Ubuntu_16.04_as_base_image.jpeg
+.. image:: images/cp_vms/control_plane_2.png
 
 Select Flavor
 -------------
-Known issues exist if flavor is too small for Rancher. Please select a flavor
-with at least 4 vCPU and 8GB ram. A size of 8 vCPU and 16GB ram is recommended.
+The recommended flavor is at least 4 vCPU and 8GB ram.
 
-.. image:: Rancher-Select_Flavor.jpeg
+.. image:: images/cp_vms/control_plane_3.png
 
 Networking
 ----------
 
-.. image:: Rancher-Networking.jpeg
+.. image:: images/cp_vms/control_plane_4.png
 
 Security Groups
 ---------------
 
-.. image:: Rancher-Security_Groups.jpeg
+.. image:: images/cp_vms/control_plane_5.png
 
 Key Pair
 --------
-Use an existing key pair (e.g. onap_key), import an existing one or create a
-new one to assign.
+Assign the key pair that was created/selected previously (e.g. onap_key).
 
-.. image:: Rancher-Key_Pair.jpeg
+.. image:: images/cp_vms/control_plane_6.png
 
-Apply customization script for the Rancher VM
----------------------------------------------
+Apply customization script for Control Plane VMs
+------------------------------------------------
 
-Click :download:`openstack-rancher.sh <openstack-rancher.sh>` to download the
-script.
+Click :download:`openstack-k8s-controlnode.sh <openstack-k8s-controlnode.sh>` 
+to download the script.
 
-.. literalinclude:: openstack-rancher.sh
+.. literalinclude:: openstack-k8s-controlnode.sh
    :language: bash
 
 This customization script will:
 
-* setup root access to the VM (comment out if you wish to disable this
-  capability and restrict access to ssh access only)
+* update ubuntu
 * install docker
-* install rancher
-* install kubectl
-* install helm
-* install nfs server
-
-.. note::
-  The Casablanca release of OOM only supports Helm 2.9.1 not the 2.7.2 shown in
-  the screen capture below. The supported versions of all the software components
-  are listed in the :ref:`cloud-setup-guide-label`.
 
-.. image:: Apply_customization_script_for_the_Rancher_VM.jpeg
+.. image:: images/cp_vms/control_plane_7.png
 
 Launch Instance
 ---------------
 
-.. image:: Rancher-Launch_Instance.jpeg
-
-Assign Floating IP for external access
---------------------------------------
-
-.. image:: Rancher-Allocate_Floating_IP.jpeg
-
-.. image:: Rancher-Manage_Floating_IP_Associations.jpeg
+.. image:: images/cp_vms/control_plane_8.png
 
-.. image:: Rancher-Launch_Instance.jpeg
 
-Kubernetes Installation
-=======================
 
-Launch new VM instance(s) to create a Kubernetes single host or cluster
------------------------------------------------------------------------
+Create Kubernetes Worker VMs
+============================
+The following instructions describe how to create OpenStack VMs to host the
+Highly-Available Kubernetes Workers. ONAP workloads will only be scheduled on these nodes.
 
-To create a cluster:
+Launch new VM instances
+-----------------------
 
-.. note::
-  #. do not append a '-1' suffix (e.g. sb4-k8s)
-  #. increase count to the # of of kubernetes worker nodes you want (eg. 3)
+The number and size of Worker VMs is depenedent on the size of the ONAP deployment. 
+By default, all ONAP applications are deployed. It's possible to customize the deployment 
+and enable a subset of the ONAP applications. For the purpose of this guide, however,
+we will deploy 12 Kubernetes Workers that have been sized to handle the entire ONAP 
+application workload.
 
-.. image:: K8s-Launch_new_VM_instance_to_create_a_Kubernetes_single_host_or_cluster.jpeg
+.. image:: images/wk_vms/worker_1.png
 
-Select Ubuntu 16.04 as base image
+Select Ubuntu 18.04 as base image
 ---------------------------------
 Select "No" on "Create New Volume"
 
-.. image:: K8s-Select_Ubuntu_16.04_as_base_image.jpeg
+.. image:: images/wk_vms/worker_2.png
 
 Select Flavor
 -------------
-The size of a Kubernetes host depends on the size of the ONAP deployment that
-will be installed.
-
-As of the Casablanca release a minimum 224GB will be needed to run a
-full ONAP deployment (all components). It is recommended that more hosts are
-used with fewer resources instead of only a few large hosts. For example 14 x
-16GB hosts.
+The size of Kubernetes hosts depend on the size of the ONAP deployment
+being installed.
 
-If a small subset of ONAP components are being deployed for testing purposes,
-then a single 16GB or 32GB host should suffice.
+If a small subset of ONAP applications are being deployed
+(i.e. for testing purposes), then 16GB or 32GB may be sufficient.
 
-.. image:: K8s-Select_Flavor.jpeg
+.. image:: images/wk_vms/worker_3.png
 
 Networking
 -----------
 
-.. image:: K8s-Networking.jpeg
+.. image:: images/wk_vms/worker_4.png
 
 Security Group
 ---------------
 
-.. image:: K8s-Security_Group.jpeg
+.. image:: images/wk_vms/worker_5.png
 
 Key Pair
 --------
-Use an existing key pair (e.g. onap_key), import an existing one or create a
-new one to assign.
+Assign the key pair that was created/selected previously (e.g. onap_key).
 
-.. image:: K8s-Key_Pair.jpeg
+.. image:: images/wk_vms/worker_6.png
 
 Apply customization script for Kubernetes VM(s)
 -----------------------------------------------
 
-Click :download:`openstack-k8s-node.sh <openstack-k8s-node.sh>` to
-download the script.
+Click :download:`openstack-k8s-workernode.sh <openstack-k8s-workernode.sh>` to download the
+script.
 
-.. literalinclude:: openstack-k8s-node.sh
+.. literalinclude:: openstack-k8s-workernode.sh
    :language: bash
 
 This customization script will:
 
-* setup root access to the VM (comment out if you wish to disable this
-  capability and restrict access to ssh access only)
+* update ubuntu
 * install docker
-* install kubectl
-* install helm
-* install nfs common (see configuration step here)
+* install nfs common
 
-.. note::
-  Ensure you are using the correct versions as described in the
-  :ref:`cloud-setup-guide-label`
 
 Launch Instance
 ---------------
 
-.. image:: K8s-Launch_Instance.jpeg
+.. image:: images/wk_vms/worker_7.png
 
-Assign Floating IP for external access
---------------------------------------
 
-.. image:: K8s-Manage_Floating_IP_Associations.jpeg
 
-.. image:: K8s-Launch_Instance.jpeg
 
-Setting up an NFS share for Multinode Kubernetes Clusters
-=========================================================
-The figure below illustrates a possible topology of a multinode Kubernetes
-cluster.
+Assign Floating IP addresses
+----------------------------
+Assign Floating IPs to all Control Plane and Worker VMs.
+These addresses provide external access to the VMs and will be used by RKE
+to configure kubernetes on to the VMs.
 
-.. image:: k8s-topology.jpg
+Repeat the following for each VM previously created:
 
-One node, the Master Node, runs Rancher and Helm clients and connects to all
-the Kubernetes nodes in the cluster. Kubernetes nodes, in turn, run Rancher,
-Kubernetes and Tiller (Helm) agents, which receive, execute, and respond to
-commands issued by the Master Node (e.g. kubectl or helm operations). Note that
-the Master Node can be either a remote machine that the user can log in to or a
-local machine (e.g. laptop, desktop) that has access to the Kubernetes cluster.
+.. image:: images/floating_ips/floating_1.png
 
-Deploying applications to a Kubernetes cluster requires Kubernetes nodes to
-share a common, distributed filesystem. One node in the cluster plays the role
-of NFS Master (not to confuse with the Master Node that runs Rancher and Helm
-clients, which is located outside the cluster), while all the other cluster
-nodes play the role of NFS slaves. In the figure above, the left-most cluster
-node plays the role of NFS Master (indicated by the crown symbol). To properly
-set up an NFS share on Master and Slave nodes, the user can run the scripts
-below.
+Resulting floating IP assignments in this example.
 
-Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the
-script.
+.. image:: images/floating_ips/floating_2.png
 
-.. literalinclude:: master_nfs_node.sh
-   :language: bash
 
-Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script.
 
-.. literalinclude:: slave_nfs_node.sh
-   :language: bash
 
-The master_nfs_node.sh script runs in the NFS Master node and needs the list of
-NFS Slave nodes as input, e.g.::
+Configure Rancher Kubernetes Engine (RKE)
+=========================================
 
-    > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip
+Install RKE
+-----------
+Download and install RKE on a VM, desktop or laptop.
+Binaries can be found here for Linux and Mac: https://github.com/rancher/rke/releases/tag/v0.2.1
 
-The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of
-the NFS Master node as input, e.g.::
+RKE requires a *cluster.yml* as input. An example file is show below that
+describes a Kubernetes cluster that will be mapped onto the OpenStack VMs
+created earlier in this guide.
 
-    > sudo ./slave_nfs_node.sh master_node_ip
+Example: **cluster.yml**
+
+.. image:: images/rke/rke_1.png
 
-Configuration (Rancher and Kubernetes)
-======================================
+Click :download:`cluster.yml <cluster.yml>` to download the
+configuration file.
 
-Access Rancher server via web browser
--------------------------------------
-(e.g.  http://10.12.6.16:8080/env/1a5/apps/stacks)
+.. literalinclude:: cluster.yml
+   :language: yaml
 
-.. image:: Access_Rancher_server_via_web_browser.jpeg
+Prepare cluster.yml
+-------------------
+Before this configuration file can be used the external **address**
+and the **internal_address** must be mapped for each control and worker node
+in this file.
+
+Run RKE
+-------
+From within the same directory as the cluster.yml file, simply execute:
+
+  > rke up
+
+The output will look something like:
+
+.. code-block::
+
+  INFO[0000] Initiating Kubernetes cluster
+  INFO[0000] [certificates] Generating admin certificates and kubeconfig
+  INFO[0000] Successfully Deployed state file at [./cluster.rkestate]
+  INFO[0000] Building Kubernetes cluster
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.82]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.249]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.74]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.85]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.238]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.89]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.5.11]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.90]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.244]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.5.165]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.126]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.111]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.5.160]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.5.191]
+  INFO[0000] [dialer] Setup tunnel for host [10.12.6.195]
+  INFO[0002] [network] Deploying port listener containers
+  INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.85]
+  INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89]
+  INFO[0002] [network] Pulling image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.90]
+  INFO[0011] [network] Successfully pulled image [nexus3.onap.org:10001/rancher/rke-tools:v0.1.27] on host [10.12.6.89]
+  . . . .
+  INFO[0309] [addons] Setting up Metrics Server
+  INFO[0309] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes
+  INFO[0309] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes
+  INFO[0309] [addons] Executing deploy job rke-metrics-addon
+  INFO[0315] [addons] Metrics Server deployed successfully
+  INFO[0315] [ingress] Setting up nginx ingress controller
+  INFO[0315] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes
+  INFO[0316] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes
+  INFO[0316] [addons] Executing deploy job rke-ingress-controller
+  INFO[0322] [ingress] ingress controller nginx deployed successfully
+  INFO[0322] [addons] Setting up user addons
+  INFO[0322] [addons] no user addons defined
+  INFO[0322] Finished building Kubernetes cluster successfully
+
+Install Kubectl
+===============
+
+Download and install kubectl. Binaries can be found here for Linux and Mac:
+
+https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/linux/amd64/kubectl
+https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/darwin/amd64/kubectl
+
+Validate deployment
+-------------------
+  > cp kube_config_cluster.yml ~/.kube/config.onap
 
-Add Kubernetes Environment to Rancher
--------------------------------------
+  > export KUBECONFIG=~/.kube/config.onap
 
-1. Select “Manage Environments”
+  > kubectl config use-context onap
 
-.. image:: Add_Kubernetes_Environment_to_Rancher.png
+  > kubectl get nodes -o=wide
 
-2. Select “Add Environment”
+.. code-block::
 
-.. image:: Select_Add_Environment.png
+  NAME             STATUS   ROLES               AGE     VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE           KERNEL-VERSION      CONTAINER-RUNTIME
+  onap-control-1   Ready    controlplane,etcd   3h53m   v1.13.5   10.0.0.8      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-control-2   Ready    controlplane,etcd   3h53m   v1.13.5   10.0.0.11     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-control-3   Ready    controlplane,etcd   3h53m   v1.13.5   10.0.0.12     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-1       Ready    worker              3h53m   v1.13.5   10.0.0.14     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-10      Ready    worker              3h53m   v1.13.5   10.0.0.16     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-11      Ready    worker              3h53m   v1.13.5   10.0.0.18     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-12      Ready    worker              3h53m   v1.13.5   10.0.0.7      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-2       Ready    worker              3h53m   v1.13.5   10.0.0.26     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-3       Ready    worker              3h53m   v1.13.5   10.0.0.5      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-4       Ready    worker              3h53m   v1.13.5   10.0.0.6      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-5       Ready    worker              3h53m   v1.13.5   10.0.0.9      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-6       Ready    worker              3h53m   v1.13.5   10.0.0.17     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-7       Ready    worker              3h53m   v1.13.5   10.0.0.20     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-8       Ready    worker              3h53m   v1.13.5   10.0.0.10     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
+  onap-k8s-9       Ready    worker              3h53m   v1.13.5   10.0.0.4      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
 
-3. Add unique name for your new Rancher environment
 
-4. Select the Kubernetes template
+Install Helm
+============
 
-5. Click "create"
+Example Helm client install on Linux:
+  > wget http://storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz
 
-.. image:: Click_create.jpeg
+  > tar -zxvf helm-v2.12.3-linux-amd64.tar.gz
 
-6. Select the new named environment (ie. SB4) from the dropdown list (top
-   left).
+  > sudo mv linux-amd64/helm /usr/local/bin/helm
 
-Rancher is now waiting for a Kubernetes Host to be added.
+Initialize Kubernetes Cluster for use by Helm
+---------------------------------------------
+  > kubectl -n kube-system create serviceaccount tiller
 
-.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg
+  > kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
 
-Add Kubernetes Host
--------------------
+  > helm init --service-account tiller
 
-1.  If this is the first (or only) host being added - click on the "Add a host"
-    link
+  > kubectl -n kube-system  rollout status deploy/tiller-deploy
 
-.. image:: K8s-Assign_Floating_IP_for_external_access.jpeg
 
-and click on "Save" (accept defaults).
 
-.. image:: and_click_on_Save_accept_defaults.jpeg
+Setting up an NFS share for Multinode Kubernetes Clusters
+=========================================================
+Deploying applications to a Kubernetes cluster requires Kubernetes nodes to
+share a common, distributed filesystem. In this tutorial, we will setup an
+NFS Master, and configure all Worker nodes a Kubernetes cluster to play
+the role of NFS slaves.
+
+It is recommneded that a separate VM, outside of the kubernetes
+cluster, be used. This is to ensure that the NFS Master does not compete for
+resources with Kubernetes Control Plane or Worker Nodes.
+
+
+Launch new NFS Server VM instance
+---------------------------------
+.. image:: images/nfs_server/nfs_server_1.png
+
+Select Ubuntu 18.04 as base image
+---------------------------------
+Select "No" on "Create New Volume"
+
+.. image:: images/nfs_server/nfs_server_2.png
+
+Select Flavor
+-------------
+
+.. image:: images/nfs_server/nfs_server_3.png
+
+Networking
+-----------
+
+.. image:: images/nfs_server/nfs_server_4.png
+
+Security Group
+---------------
+
+.. image:: images/nfs_server/nfs_server_5.png
+
+Key Pair
+--------
+Assign the key pair that was created/selected previously (e.g. onap_key).
 
-otherwise select INFRASTRUCTURE→ Hosts and click on "Add Host"
+.. image:: images/nfs_server/nfs_server_6.png
 
-.. image:: otherwise_select_INFRASTRUCTURE_Hosts_and_click_on_Add_Host.jpg
+Apply customization script for NFS Server VM
+--------------------------------------------
 
-2. Enter the management IP for the k8s VM (e.g. 10.0.0.4) that was just
-   created.
+Click :download:`openstack-nfs-server.sh <openstack-nfs-server.sh>` to download the
+script.
 
-3. Click on “Copy to Clipboard” button
+.. literalinclude:: openstack-k8s-workernode.sh
+   :language: bash
 
-4. Click on “Close” button
+This customization script will:
 
-.. image:: Click_on_Close_button.jpeg
+* update ubuntu
+* install nfs server
 
-Without the 10.0.0.4 IP - the CATTLE_AGENT will be derived on the host - but it
-may not be a routable IP.
 
-Configure Kubernetes Host
--------------------------
+Launch Instance
+---------------
 
-1. Login to the new Kubernetes Host::
+.. image:: images/nfs_server/nfs_server_7.png
 
-    > ssh -i ~/oom-key.pem ubuntu@10.12.5.1
-    The authenticity of host '10.12.5.172 (10.12.5.172)' can't be established.
-    ECDSA key fingerprint is SHA256:tqxayN58nCJKOJcWrEZzImkc0qKQHDDfUTHqk4WMcEI.
-    Are you sure you want to continue connecting (yes/no)? yes
-    Warning: Permanently added '10.12.5.172' (ECDSA) to the list of known hosts.
-    Welcome to Ubuntu 16.04.2 LTS (GNU/Linux 4.4.0-64-generic x86_64)
 
-     * Documentation: https://help.ubuntu.com
-     * Management: https://landscape.canonical.com
-     * Support: https://ubuntu.com/advantage
 
-     Get cloud support with Ubuntu Advantage Cloud Guest:
-       http://www.ubuntu.com/business/services/cloud
+Assign Floating IP addresses
+----------------------------
 
-    180 packages can be updated.
-    100 updates are security updates.
+.. image:: images/nfs_server/nfs_server_8.png
 
-    The programs included with the Ubuntu system are free software;
-    the exact distribution terms for each program are described in the
-    individual files in /usr/share/doc/*/copyright.
+Resulting floating IP assignments in this example.
 
-    Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
-    applicable law.
+.. image:: images/nfs_server/nfs_server_9.png
 
-    To run a command as administrator (user "root"), use "sudo <command>".
-    See "man sudo_root" for details.
 
-    ubuntu@sb4-k8s-1:~$
+To properly set up an NFS share on Master and Slave nodes, the user can run the
+scripts below.
 
+Click :download:`master_nfs_node.sh <master_nfs_node.sh>` to download the
+script.
+
+.. literalinclude:: master_nfs_node.sh
+   :language: bash
 
-2. Paste Clipboard content and hit enter to install Rancher Agent::
+Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script.
 
-    ubuntu@sb4-k8s-1:~$ sudo docker run -e CATTLE_AGENT_IP="10.0.0.4“ --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.9 http://10.12.6.16:8080/v1/scripts/5D757C68BD0A2125602A:1514678400000:yKW9xHGJDLvq6drz2eDzR2mjato
-    Unable to find image 'rancher/agent:v1.2.9' locally
-    v1.2.9: Pulling From rancher/agent
-    b3e1c725a85f: Pull complete
-    6071086409fc: Pull complete
-    d0ac3b234321: Pull complete
-    87f567b5cf58: Pull complete
-    a63e24b217c4: Pull complete
-    d0a3f58caef0: Pull complete
-    16914729cfd3: Pull complete
-    dc5c21984c5b: Pull complete
-    d7e8f9784b20: Pull complete
-    Digest: sha256:c21255ac4d94ffbc7b523F870F20ea5189b68Fa3d642800adb4774aab4748e66
-    Status: Downloaded newer image for rancher/agent:v1.2.9
+.. literalinclude:: slave_nfs_node.sh
+   :language: bash
+
+The master_nfs_node.sh script runs in the NFS Master node and needs the list of
+NFS Slave nodes as input, e.g.::
+
+    > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip
+
+The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of
+the NFS Master node as input, e.g.::
+
+    > sudo ./slave_nfs_node.sh master_node_ip
 
-    INFO: Running Agent Registration Process, CATTLE_URL=http://10.12.6.16:8080/v1
-    INFO: Attempting to connect to: http://10.12.6.16:8080/v1
-    INFO: http://10.12.6.16:8080/v1 is accessible
-    INFO: Inspecting host capabilities
-    INFO: Boot2Docker: false
-    INFO: Host writable: true
-    INFO: Token: xxxxxxxx
-    INFO: Running registration
-    INFO: Printing Environment
-    INFO: ENV: CATTLE_ACCESS_KEY=98B35AC484FBF820E0AD
-    INFO: ENV: CATTLE_AGENT_IP=10.0.9.4
-    INFO: ENV: CATTLE_HOME=/var/lib/cattle
-    INFO: ENV: CATTLE_REGISTRATION_ACCESS_KEY=registrationToken
-    INFO: ENV: CATTLE_REGISTRATION_SECRET_KEY=xxxxxxx
-    INFO: ENV: CATTLE_SECRET_KEY=xxxxxxx
-    INFO: ENV: CATTLE_URL=http://10.12.6.16:8080/v1
-    INFO: ENV: DETECTED_CATTLE_AGENT_IP=10.12.5.172
-    INFO: ENV: RANCHER_AGENT_IMAGE=rancher/agent:v1.2.9
-    INFO: Launched Rancher Agent: c27ee0f3dc4c783b0db647ea1f73c35b3843a4b8d60b96375b1a05aa77d83136
-    ubuntu@sb4-k8s-1:~$
-
-3. Return to Rancher environment (e.g. SB4) and wait for services to complete
-   (~ 10-15 mins)
-
-.. image:: Return_to_Rancher_environment_eg_SB4_and_wait_for_services_to_complete_10-15_mins.jpeg
-
-Configure kubectl and helm
-==========================
-In this example we are configuring kubectl and helm that have been installed
-(as a convenience) onto the rancher and kubernetes hosts.  Typically you would
-install them both on your PC and remotely connect to the cluster. The following
-procedure would remain the same.
-
-1. Click on CLI and then click on “Generate Config”
-
-.. image:: Click_on_CLI_and_then_click_on_Generate_Config.jpeg
-
-2. Click on “Copy to Clipboard” - wait until you see a "token" - do not copy
-   user+password - the server is not ready at that point
-
-.. image:: Click_on_Copy_to_Clipboard-wait_until_you_see_a_token-do_not_copy_user+password-the_server_is_not_ready_at_that_point.jpeg
-
-3. Create a .kube directory in user directory (if one does not exist)::
-
-    ubuntu@sb4-kSs-1:~$ mkdir .kube
-    ubuntu@sb4-kSs-1:~$ vi .kube/config
-
-4. Paste contents of Clipboard into a file called “config” and save the file::
-
-    apiVersion: v1
-    kind : Config
-    clusters:
-    - cluster:
-        api-version: v1
-        insecure-skip-tls-verify: true
-        server: "https://10.12.6.16:8080/r/projects/1a7/kubernetes:6443"
-      name: "SB4"
-    contexts:
-    - context:
-        cluster: "SB4"
-        user: "SB4"
-      name: "SB4"
-    current-context: "SB4"
-    users:
-    - name: "SB4"
-      user:
-        token: "QmFzaWMgTlRBd01qZzBOemc)TkRrMk1UWkNOMFpDTlVFNlExcHdSa1JhVZreE5XSm1TRGhWU2t0Vk1sQjVhalZaY0dWaFVtZGFVMHQzWW1WWVJtVmpSQT09"
-
-5. Validate that kubectl is able to connect to the kubernetes cluster::
-
-    ubuntu@sb4-k8s-1:~$ kubectl config get-contexts
-    CURRENT   NAME   CLUSTER   AUTHINFO   NAMESPACE
-    *         SB4    SB4       SB4
-    ubuntu@sb4-kSs-1:~$
-
-and show running pods::
-
-    ubuntu@sb4-k8s-1:~$ kubectl get pods --all-namespaces -o=wide
-    NAMESPACE    NAME                                  READY   STATUS    RESTARTS   AGE   IP             NODE
-    kube-system  heapster—7Gb8cd7b5 -q7p42             1/1     Running   0          13m   10.42.213.49   sb4-k8s-1
-    kube-system  kube-dns-5d7bM87c9-c6f67              3/3     Running   0          13m   10.42.181.110  sb4-k8s-1
-    kube-system  kubernetes-dashboard-f9577fffd-kswjg  1/1     Running   0          13m   10.42.105.113  sb4-k8s-1
-    kube-system  monitoring-grafana-997796fcf-vg9h9    1/1     Running   0          13m   10.42,141.58   sb4-k8s-1
-    kube-system  monitoring-influxdb-56chd96b-hk66b    1/1     Running   0          13m   10.4Z.246.90   sb4-k8s-1
-    kube-system  tiller-deploy-cc96d4f6b-v29k9         1/1     Running   0          13m   10.42.147.248  sb4-k8s-1
-    ubuntu@sb4-k8s-1:~$
-
-6. Validate helm is running at the right version. If not, an error like this
-   will be displayed::
-
-    ubuntu@sb4-k8s-1:~$ helm list
-    Error: incompatible versions c1ient[v2.9.1] server[v2.6.1]
-    ubuntu@sb4-k8s-1:~$
-
-7. Upgrade the server-side component of helm (tiller) via `helm init --upgrade`::
-
-    ubuntu@sb4-k8s-1:~$ helm init --upgrade
-    Creating /home/ubuntu/.helm
-    Creating /home/ubuntu/.helm/repository
-    Creating /home/ubuntu/.helm/repository/cache
-    Creating /home/ubuntu/.helm/repository/local
-    Creating /home/ubuntu/.helm/plugins
-    Creating /home/ubuntu/.helm/starters
-    Creating /home/ubuntu/.helm/cache/archive
-    Creating /home/ubuntu/.helm/repository/repositories.yaml
-    Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
-    Adding local repo with URL: http://127.0.0.1:8879/charts
-    $HELM_HOME has been configured at /home/ubuntu/.helm.
-
-    Tiller (the Helm server-side component) has been upgraded to the current version.
-    Happy Helming!
-    ubuntu@sb4-k8s-1:~$
 
 ONAP Deployment via OOM
 =======================
diff --git a/docs/openstack-k8s-controlnode.sh b/docs/openstack-k8s-controlnode.sh
new file mode 100644 (file)
index 0000000..1d230c2
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+DOCKER_VERSION=18.09.5
+
+apt-get update
+
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+
+sudo usermod -aG docker ubuntu
+
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+HOSTNAME=`hostname`
+
+echo "$IP_ADDR $HOSTNAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+
+exit 0
diff --git a/docs/openstack-k8s-node.sh b/docs/openstack-k8s-node.sh
deleted file mode 100644 (file)
index 308f220..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-DOCKER_VERSION=17.03
-KUBECTL_VERSION=1.11.5
-HELM_VERSION=2.9.1
-
-# setup root access - default login: oom/oom - comment out to restrict access too ssh key only
-sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
-sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
-service sshd restart
-echo -e "oom\noom" | passwd root
-
-apt-get update
-curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
-mkdir -p /etc/systemd/system/docker.service.d/
-cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
-[Service]
-ExecStart=
-ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
-EOF
-systemctl daemon-reload
-systemctl restart docker
-apt-mark hold docker-ce
-
-IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
-HOSTNAME=`hostname`
-
-echo "$IP_ADDY $HOSTNAME" >> /etc/hosts
-
-docker login -u docker -p docker nexus3.onap.org:10001
-
-sudo apt-get install make -y
-
-sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
-sudo chmod +x ./kubectl
-sudo mv ./kubectl /usr/local/bin/kubectl
-sudo mkdir ~/.kube
-wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
-sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
-sudo mv linux-amd64/helm /usr/local/bin/helm
-
-# install nfs
-sudo apt-get install nfs-common -y
-
-
-exit 0
diff --git a/docs/openstack-k8s-workernode.sh b/docs/openstack-k8s-workernode.sh
new file mode 100644 (file)
index 0000000..3f32d05
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+DOCKER_VERSION=18.09.5
+
+apt-get update
+
+curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+mkdir -p /etc/systemd/system/docker.service.d/
+cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
+[Service]
+ExecStart=
+ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
+EOF
+
+sudo usermod -aG docker ubuntu
+
+systemctl daemon-reload
+systemctl restart docker
+apt-mark hold docker-ce
+
+IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+HOSTNAME=`hostname`
+
+echo "$IP_ADDR $HOSTNAME" >> /etc/hosts
+
+docker login -u docker -p docker nexus3.onap.org:10001
+
+sudo apt-get install make -y
+
+# install nfs
+sudo apt-get install nfs-common -y
+
+
+exit 0
diff --git a/docs/openstack-nfs-server.sh b/docs/openstack-nfs-server.sh
new file mode 100644 (file)
index 0000000..1db04ea
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+apt-get update
+
+IP_ADDR=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
+HOSTNAME=`hostname`
+
+echo "$IP_ADDR $HOSTNAME" >> /etc/hosts
+
+sudo apt-get install make -y
+
+# nfs server
+sudo apt-get install nfs-kernel-server -y
+
+sudo mkdir -p /nfs_share
+sudo chown nobody:nogroup /nfs_share/
+
+exit 0
diff --git a/docs/openstack-rancher.sh b/docs/openstack-rancher.sh
deleted file mode 100644 (file)
index ac91ff5..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-DOCKER_VERSION=17.03
-RANCHER_VERSION=1.6.22
-KUBECTL_VERSION=1.11.5
-HELM_VERSION=2.9.1
-
-# setup root access - default login: oom/oom - comment out to restrict access too ssh key only
-sed -i 's/PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
-sed -i 's/PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
-service sshd restart
-echo -e "oom\noom" | passwd root
-
-apt-get update
-curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
-mkdir -p /etc/systemd/system/docker.service.d/
-cat > /etc/systemd/system/docker.service.d/docker.conf << EOF
-[Service]
-ExecStart=
-ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry=nexus3.onap.org:10001
-EOF
-systemctl daemon-reload
-systemctl restart docker
-apt-mark hold docker-ce
-
-IP_ADDY=`ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}'`
-HOSTNAME=`hostname`
-
-echo "$IP_ADDY $HOSTNAME" >> /etc/hosts
-
-docker login -u docker -p docker nexus3.onap.org:10001
-
-sudo apt-get install make -y
-
-sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher_server rancher/server:v$RANCHER_VERSION
-sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
-sudo chmod +x ./kubectl
-sudo mv ./kubectl /usr/local/bin/kubectl
-sudo mkdir ~/.kube
-wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
-sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
-sudo mv linux-amd64/helm /usr/local/bin/helm
-
-# nfs server
-sudo apt-get install nfs-kernel-server -y
-
-sudo mkdir -p /nfs_share
-sudo chown nobody:nogroup /nfs_share/
-
-
-exit 0
diff --git a/kubernetes/aaf/.gitignore b/kubernetes/aaf/.gitignore
new file mode 100644 (file)
index 0000000..3a4f8ba
--- /dev/null
@@ -0,0 +1 @@
+/sms/
similarity index 97%
rename from kubernetes/aaf/charts/aaf-cs/Chart.yaml
rename to kubernetes/aaf/charts/aaf-cass/Chart.yaml
index ff083ac..f06d149 100644 (file)
@@ -14,5 +14,5 @@
 
 apiVersion: v1
 description: ONAP AAF cassandra
-name: aaf-cs
+name: aaf-cass
 version: 4.0.0
@@ -23,7 +23,7 @@ metadata:
     release: {{ .Release.Name }}
     heritage: {{ .Release.Service }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.cass.replicas }}
   template:
     metadata:
       labels:
@@ -32,39 +32,62 @@ spec:
     spec:
       containers:
       - name: {{ include "common.name" . }}
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_cass:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        # installing with cmd "onap" will not only initialize the DB, but add ONAP bootstrap data as well
         command: ["/bin/bash","/opt/app/aaf/cass_init/cmd.sh","onap"]
+        lifecycle:
+          preStop:
+            exec:
+              command: ["/bin/sh","-c","rm /opt/app/aaf/status/aaf-cass"]
         ports:
-        - containerPort: {{ .Values.service.externalPort }}
-        - containerPort: {{ .Values.service.externalPort2 }}
-        - containerPort: {{ .Values.service.externalPort3 }}
-        - containerPort: {{ .Values.service.externalPort4 }}
+        - name: storage
+          containerPort: {{.Values.global.aaf.cass.storage_port}}
+        - name: ssl-storage
+          containerPort: {{.Values.global.aaf.cass.ssl_storage_port}}
+        - name: native-trans
+          containerPort: {{.Values.global.aaf.cass.native_trans_port}}
+        - name: rpc
+          containerPort: {{.Values.global.aaf.cass.rpc_port}}
         env:
         - name: CASSANDRA_CLUSTER_NAME
-          value: "osaaf"
+          value: {{.Values.global.aaf.cass.cluster_name}}
         - name: CASSANDRA_DC
-          value: "dc1"
+          value: {{.Values.global.aaf.cass.dc}}
         - name: HEAP_NEWSIZE
-          value: "512M"
+          value: {{.Values.global.aaf.cass.heap_new_size}}
         - name: MAX_HEAP_SIZE
-          value: "1024M"
+          value: {{.Values.global.aaf.cass.max_heap_size}}
+        - name: MY_POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: MY_POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: MY_POD_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
         volumeMounts:
         - mountPath: /var/lib/cassandra
-          name: cassandra-storage
+          name: aaf-cass-vol
+        - mountPath: /opt/app/aaf/status
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort3 }}
+            port: {{.Values.global.aaf.cass.native_trans_port}}
           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
           periodSeconds: {{ .Values.liveness.periodSeconds }}
         {{ end -}}
         readinessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort3 }}
+            port: {{.Values.global.aaf.cass.native_trans_port}}
           initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
           periodSeconds: {{ .Values.readiness.periodSeconds }}
         resources:
@@ -81,10 +104,13 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: cassandra-storage
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-cass-vol
       {{- if .Values.persistence.enabled }}
         persistentVolumeClaim:
-          claimName: {{ include "common.fullname" . }}
+          claimName: {{ include "common.fullname" . }}-pvc
       {{- else }}
         emptyDir: {}
       {{- end }}
@@ -18,7 +18,7 @@
 kind: PersistentVolume
 apiVersion: v1
 metadata:
-  name: {{ include "common.fullname" . }}
+  name: {{ include "common.fullname" . }}-pv
   namespace: {{ include "common.namespace" . }}
   labels:
     app: {{ include "common.name" . }}
@@ -41,4 +41,4 @@ spec:
   storageClassName: "{{ .Values.persistence.storageClass }}"
 {{- end }}
 {{- end }}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
@@ -18,7 +18,7 @@
 kind: PersistentVolumeClaim
 apiVersion: v1
 metadata:
-  name: {{ include "common.fullname" . }}
+  name: {{ include "common.fullname" .}}-pvc
   namespace: {{ include "common.namespace" . }}
   labels:
     app: {{ include "common.name" . }}
@@ -45,4 +45,4 @@ spec:
   storageClassName: "{{ .Values.persistence.storageClass }}"
 {{- end }}
 {{- end }}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
diff --git a/kubernetes/aaf/charts/aaf-cass/templates/service.yaml b/kubernetes/aaf/charts/aaf-cass/templates/service.yaml
new file mode 100644 (file)
index 0000000..71882b1
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "common.servicename" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+#  annotations:
+#    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    - name: storage
+      protocol: TCP
+      port: {{.Values.global.aaf.cass.storage_port}}
+      containerPort: {{.Values.global.aaf.cass.storage_port}}
+    - name: ssl-storage
+      protocol: TCP
+      port: {{.Values.global.aaf.cass.ssl_storage_port}}
+      containerPort: {{.Values.global.aaf.cass.ssl_storage_port}}
+    - name: native-trans
+      protocol: TCP
+      port: {{.Values.global.aaf.cass.native_trans_port}}
+      containerPort: {{.Values.global.aaf.cass.native_trans_port}}
+    - name: rpc
+      protocol: TCP
+      port: {{.Values.global.aaf.cass.rpc_port}}
+      containerPort: {{.Values.global.aaf.cass.rpc_port}}
+  selector:
+    app: {{ include "common.name" . }}
+    release: {{ .Release.Name }}
+  clusterIP: None
similarity index 79%
rename from kubernetes/aaf/charts/aaf-cs/values.yaml
rename to kubernetes/aaf/charts/aaf-cass/values.yaml
index 7783745..ee05a19 100644 (file)
@@ -22,37 +22,29 @@ flavor: small
 #################################################################
 # Application configuration defaults.
 #################################################################
-# application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_cass:2.1.9-SNAPSHOT-latest
-pullPolicy: Always
-
 # application configuration
 config: {}
 
-# default number of instances
-replicaCount: 1
-
 nodeSelector: {}
 
 affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 120
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
   name: aaf-cass
   type: ClusterIP
-  portName: aaf-cs
+  portName: aaf-cass
   #targetPort
   internalPort: 7000
   #port
@@ -69,22 +61,23 @@ ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 10000Mi
-    requests:
-      cpu: 10m
-      memory: 8500Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 12000Mi
-    requests:
-      cpu: 40m
-      memory: 9000Mi
-  unlimited: {}
+resources: {}
+  # Following AAI's advice, and not trying to guess on Cass usage.  Guessing just makes it slow
+#  small:
+#    limits:
+#      cpu: "1000m"
+#      memory: 4096Mi
+#    requests:
+#      cpu: 20m
+#      memory: 2048Mi
+#  large:
+#    limits:
+#      cpu: 400m
+#      memory: 12000Mi
+#    requests:
+#      cpu: 40m
+#      memory: 9000Mi
+#  unlimited: {}
 
 persistence:
   enabled: true
@@ -93,5 +86,5 @@ persistence:
   mountSubPath: "cass"
   volumeReclaimPolicy: Retain
   accessMode: ReadWriteOnce
-  size: 10Gi
+  size: 20Gi
   storageClass: "manual"
index f78dfdc..87bcaa4 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.cm.replicas }}
   template:
     metadata:
       labels:
@@ -32,61 +32,70 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_config:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        command: ["bash","-c","cd /opt/app/aaf_config && bin/pod_wait.sh config aaf-service remove && bin/agent.sh"]
         volumeMounts:
           - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+            name: aaf-config-vol
+          - mountPath: "/opt/app/aaf/status"
+            name: aaf-status-vol
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-locate
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: cadi_x509_issuers
+            value: "{{ .Values.global.aaf.cadi_x509_issuers }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: CASSANDRA_CLUSTER
+            value: "{{.Values.global.aaf.cass.fqdn}}.{{ .Release.Namespace }}"
+#          - name: CASSANDRA_USER
+#            value: ""
+#          - name: CASSANDRA_PASSWORD
+#            value: ""
+#         - name: CASSANDRA_PORT
+#            value: ""
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_cm","sleep","0","cd /opt/app/aaf;bin/cm"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && /bin/bash bin/pod_wait.sh aaf-cm aaf-locate && exec bin/cm"]
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_cm:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
         - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+          name: aaf-config-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.cm.internal_port }}
           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
           periodSeconds: {{ .Values.liveness.periodSeconds }}
         {{ end -}}
         readinessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.cm.internal_port }}
           initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
           periodSeconds: {{ .Values.readiness.periodSeconds }}
         resources:
@@ -103,7 +112,11 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
-        emptyDir: {}
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-config-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-config-pvc
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 281aa1c..f54c4d8 100644 (file)
@@ -24,18 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.cm.internal_port }}
+      nodePort: {{ .Values.global.aaf.cm.public_port }}
+      name: aaf-cm
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index ec7de46..6ace7d1 100644 (file)
@@ -24,14 +24,6 @@ flavor: small
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_cm:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-cm.onap"
-pullPolicy: Always
-
-
-# default number of instances
-replicaCount: 1
 
 nodeSelector: {}
 
@@ -39,14 +31,14 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -62,19 +54,20 @@ ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 800Mi
-    requests:
-      cpu: 20m
-      memory: 500Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 1Gi
-    requests:
-      cpu: 40m
-      memory: 600Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#  small:
+#    limits:
+#      cpu: 200m
+#      memory: 800Mi
+#    requests:
+#      cpu: 20m
+#      memory: 500Mi
+#  large:
+#    limits:
+#      cpu: 400m
+#      memory: 1Gi
+#    requests:
+#      cpu: 40m
+#      memory: 600Mi
+#  unlimited: {}
diff --git a/kubernetes/aaf/charts/aaf-cs/templates/service.yaml b/kubernetes/aaf/charts/aaf-cs/templates/service.yaml
deleted file mode 100644 (file)
index b1716e4..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright © 2017 Amdocs, Bell Canada
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "common.servicename" . }}
-  namespace: {{ include "common.namespace" . }}
-  labels:
-    app: {{ include "common.name" . }}
-    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
-    release: {{ .Release.Name }}
-    heritage: {{ .Release.Service }}
-#  annotations:
-#    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
-spec:
-  type: {{ .Values.service.type }}
-  ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    - port: {{ .Values.service.externalPort2 }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
-      name: {{ .Values.service.portName }}2
-    - port: {{ .Values.service.externalPort3 }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort3 }}
-      name: {{ .Values.service.portName }}3
-    - port: {{ .Values.service.externalPort4 }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort4 }}
-      name: {{ .Values.service.portName }}4
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    - port: {{ .Values.service.externalPort2 }}
-      targetPort: {{ .Values.service.internalPort2 }}
-      name: {{ .Values.service.portName }}2
-    - port: {{ .Values.service.externalPort3 }}
-      targetPort: {{ .Values.service.internalPort3 }}
-      name: {{ .Values.service.portName }}3
-    - port: {{ .Values.service.externalPort4 }}
-      targetPort: {{ .Values.service.internalPort4 }}
-      name: {{ .Values.service.portName }}4
-    {{- end}}
-  selector:
-    app: {{ include "common.name" . }}
-    release: {{ .Release.Name }}
-  clusterIP: None
index b938448..9f97da9 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.fs.replicas }}
   template:
     metadata:
       labels:
@@ -32,48 +32,56 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_config:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        command: ["bash","-c","cd /opt/app/aaf_config && bin/pod_wait.sh config aaf-service remove && bin/agent.sh"]
         volumeMounts:
           - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+            name: aaf-config-vol
+          - mountPath: "/opt/app/aaf/status"
+            name: aaf-status-vol
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-locate
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: cadi_x509_issuers
+            value: "{{ .Values.global.aaf.cadi_x509_issuers }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: CASSANDRA_CLUSTER
+            value: "{{.Values.global.aaf.cass.fqdn}}.{{ .Release.Namespace }}"
+#          - name: CASSANDRA_USER
+#            value: ""
+#          - name: CASSANDRA_PASSWORD
+#            value: ""
+#         - name: CASSANDRA_PORT
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_fs","sleep","0","cd /opt/app/aaf;bin/fs"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && /bin/bash bin/pod_wait.sh aaf-fs aaf-locate && exec bin/fs"]
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_fs:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
         - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+          name: aaf-config-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
@@ -103,7 +111,11 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
-        emptyDir: {}
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-config-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-config-pvc
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 281aa1c..1042afd 100644 (file)
@@ -24,18 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.fs.internal_port }}
+      nodePort: {{ .Values.global.aaf.fs.public_port }}
+      name: aaf-hello
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index 526a9fd..48ad7c1 100644 (file)
@@ -24,29 +24,23 @@ flavor: small
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_fs:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-fs.onap"
 pullPolicy: Always
 
 
-# default number of instances
-replicaCount: 1
-
 nodeSelector: {}
 
 affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -62,19 +56,20 @@ ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 600Mi
-    requests:
-      cpu: 10m
-      memory: 300Mi
-  large:
-    limits:
-      cpu: 500m
-      memory: 700Mi
-    requests:
-      cpu: 100m
-      memory: 400Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#  small:
+#    limits:
+#      cpu: 200m
+#      memory: 800Mi
+#    requests:
+#      cpu: 10m
+#      memory: 300Mi
+#  large:
+#    limits:
+#      cpu: 500m
+#      memory: 700Mi
+#    requests:
+#      cpu: 100m
+#      memory: 400Mi
+#  unlimited: {}
index 9dd8794..d067b5e 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.gui.replicas }}
   template:
     metadata:
       labels:
@@ -32,61 +32,69 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_config:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        command: ["bash","-c","cd /opt/app/aaf_config && bin/pod_wait.sh config aaf-service remove && bin/agent.sh"]
         volumeMounts:
           - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+            name: aaf-config-vol
+          - mountPath: "/opt/app/aaf/status"
+            name: aaf-status-vol
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-cm
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: cadi_x509_issuers
+            value: "{{ .Values.global.aaf.cadi_x509_issuers }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: CASSANDRA_CLUSTER
+            value: "{{.Values.global.aaf.cass.fqdn}}.{{ .Release.Namespace }}"
+#          - name: CASSANDRA_USER
+#            value: ""
+#          - name: CASSANDRA_PASSWORD
+#            value: ""
+#         - name: CASSANDRA_PORT
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_gui","sleep","0","cd /opt/app/aaf;bin/gui"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && /bin/bash bin/pod_wait.sh aaf-gui aaf-locate && exec bin/gui"]  
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_gui:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
         - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+          name: aaf-config-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.gui.internal_port }}
           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
           periodSeconds: {{ .Values.liveness.periodSeconds }}
         {{ end -}}
         readinessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.gui.internal_port }}
           initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
           periodSeconds: {{ .Values.readiness.periodSeconds }}
         resources:
@@ -103,7 +111,11 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
-        emptyDir: {}
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-config-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-config-pvc
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 281aa1c..ea06925 100644 (file)
@@ -24,18 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.gui.internal_port }}
+      nodePort: {{ .Values.global.aaf.gui.public_port }}
+      name: aaf-gui
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index 145dec1..3068f14 100644 (file)
@@ -25,29 +25,23 @@ flavor: small
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_gui:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-gui.onap"
 pullPolicy: Always
 
 
-# default number of instances
-replicaCount: 1
-
 nodeSelector: {}
 
 affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -64,19 +58,21 @@ ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 100m
-      memory: 500Mi
-    requests:
-      cpu: 10m
-      memory: 200Mi
-  large:
-    limits:
-      cpu: 200m
-      memory: 1Gi
-    requests:
-      cpu: 100m
-      memory: 500Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#resources:
+#  small:
+#    limits:
+#      cpu: 100m
+#      memory: 500Mi
+#    requests:
+#      cpu: 10m
+#      memory: 200Mi
+#  large:
+#    limits:
+#      cpu: 200m
+#      memory: 1Gi
+#    requests:
+#      cpu: 100m
+#      memory: 500Mi
+#  unlimited: {}
index c57e57f..9faa580 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.hello.replicas }}
   template:
     metadata:
       labels:
@@ -32,48 +32,60 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_agent:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
-          - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+        - mountPath: "/opt/app/osaaf/local"
+          name: aaf-hello-vol
+        command: ["bash","-c","/opt/app/aaf_config/bin/pod_wait.sh config nc aaf-cm.{{.Release.Namespace}} 8150 remove && cd /opt/app/osaaf/local && /opt/app/aaf_config/bin/agent.sh"]
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-locate
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_app_ns
+            value: "org.osaaf.aaf"
+          - name: "APP_FQDN"
+            value: "{{ .Values.global.aaf.hello.fqdn }}"
+          - name: "APP_FQI"
+            value: "aaf@aaf.osaaf.org"
+          - name: "DEPLOY_FQI"
+            value: "deployer@people.osaaf.org"
+          - name: "DEPLOY_PASSWORD"
+            value: "demo123456!"
+# Hello specific.  Clients don't necessarily need this
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: aaf_locator_fqdn_oom
+            value: "%N.%CNS"
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_hello","sleep","0","cd /opt/app/aaf;bin/hello"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && exec bin/hello"]
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_hello:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
-        - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+        - mountPath: "/opt/app/osaaf/local"
+          name: aaf-hello-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
@@ -103,7 +115,10 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-hello-vol
         emptyDir: {}
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 281aa1c..102e85d 100644 (file)
@@ -24,18 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.hello.internal_port }}
+      nodePort: {{ .Values.global.aaf.hello.public_port }}
+      name: aaf-hello
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index 8c46b80..c3ed9e1 100644 (file)
 global:
   nodePortPrefix: 302
   readinessRepository: oomk8s
-  readinessImage: readiness-check:2.0.0
+  readinessImage: readiness-check:2.0.2
 flavor: small
 #################################################################
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_hello:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-hello.onap"
-pullPolicy: Always
 
 
-# default number of instances
-replicaCount: 1
-
 nodeSelector: {}
 
 affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -62,19 +55,21 @@ ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 500Mi
-    requests:
-      cpu: 10m
-      memory: 200Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 1Gi
-    requests:
-      cpu: 20m
-      memory: 500Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#resources:
+#  small:
+#    limits:
+#      cpu: 200m
+#      memory: 500Mi
+#    requests:
+#      cpu: 10m
+#      memory: 200Mi
+#  large:
+#    limits:
+#      cpu: 400m
+#      memory: 1Gi
+#    requests:
+#      cpu: 20m
+#      memory: 500Mi
+#  unlimited: {}
index a2f9f55..f504063 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.locate.replicas }}
   template:
     metadata:
       labels:
@@ -32,61 +32,69 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_config:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        command: ["bash","-c","cd /opt/app/aaf_config && bin/pod_wait.sh config aaf-service remove && bin/agent.sh"]
         volumeMounts:
           - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+            name: aaf-config-vol
+          - mountPath: "/opt/app/aaf/status"
+            name: aaf-status-vol
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-service
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: cadi_x509_issuers
+            value: "{{ .Values.global.aaf.cadi_x509_issuers }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: CASSANDRA_CLUSTER
+            value: "{{.Values.global.aaf.cass.fqdn}}.{{ .Release.Namespace }}"
+#          - name: CASSANDRA_USER
+#            value: ""
+#          - name: CASSANDRA_PASSWORD
+#            value: ""
+#         - name: CASSANDRA_PORT
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_locate","sleep","0","cd /opt/app/aaf;bin/locate"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && /bin/bash bin/pod_wait.sh aaf-locate aaf-service && exec bin/locate"]
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_locate:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
         - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+          name: aaf-config-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.locate.internal_port }}
           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
           periodSeconds: {{ .Values.liveness.periodSeconds }}
         {{ end -}}
         readinessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.locate.internal_port }}
           initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
           periodSeconds: {{ .Values.readiness.periodSeconds }}
         resources:
@@ -103,7 +111,11 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
-        emptyDir: {}
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-config-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-config-pvc
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 281aa1c..606d947 100644 (file)
@@ -24,18 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.locate.internal_port }}
+      nodePort: {{ .Values.global.aaf.locate.public_port }}
+      name: aaf-locate
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index a323837..c13f834 100644 (file)
@@ -24,14 +24,6 @@ flavor: small
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_locate:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-locate.onap"
-pullPolicy: Always
-
-
-# default number of instances
-replicaCount: 1
 
 nodeSelector: {}
 
@@ -39,14 +31,14 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -56,25 +48,27 @@ service:
   #targetPort
   internalPort: 8095
   #port
-  externalPort: 8095
+  externalPort: 31111
 
 ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 600Mi
-    requests:
-      cpu: 20m
-      memory: 300Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 1Gi
-    requests:
-      cpu: 40m
-      memory: 500Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#resources:
+#  small:
+#    limits:
+#      cpu: 200m
+#      memory: 600Mi
+#    requests:
+#      cpu: 20m
+#      memory: 300Mi
+#  large:
+#    limits:
+#      cpu: 400m
+#      memory: 1Gi
+#    requests:
+#      cpu: 40m
+#      memory: 500Mi
+#  unlimited: {}
index f7a34f4..cc328d5 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.oauth.replicas }}
   template:
     metadata:
       labels:
@@ -32,61 +32,69 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_config:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        command: ["bash","-c","cd /opt/app/aaf_config && bin/pod_wait.sh config aaf-service remove && bin/agent.sh"]
         volumeMounts:
           - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+            name: aaf-config-vol
+          - mountPath: "/opt/app/aaf/status"
+            name: aaf-status-vol
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-locate
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: cadi_x509_issuers
+            value: "{{ .Values.global.aaf.cadi_x509_issuers }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: CASSANDRA_CLUSTER
+            value: "{{.Values.global.aaf.cass.fqdn}}.{{ .Release.Namespace }}"
+#          - name: CASSANDRA_USER
+#            value: ""
+#          - name: CASSANDRA_PASSWORD
+#            value: ""
+#         - name: CASSANDRA_PORT
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_oauth","sleep","0","cd /opt/app/aaf;bin/oauth"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && /bin/bash bin/pod_wait.sh aaf-oauth aaf-service && exec bin/oauth"]
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_oauth:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
         volumeMounts:
         - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+          name: aaf-config-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.oauth.internal_port }}
           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
           periodSeconds: {{ .Values.liveness.periodSeconds }}
         {{ end -}}
         readinessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.oauth.internal_port }}
           initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
           periodSeconds: {{ .Values.readiness.periodSeconds }}
         resources:
@@ -103,7 +111,11 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
-        emptyDir: {}
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-config-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-config-pvc
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index d94bcae..bb4a051 100644 (file)
@@ -24,16 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.oauth.internal_port }}
+      nodePort: {{ .Values.global.aaf.oauth.public_port }}
+      name: aaf-oauth
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index 7cb9f3b..3c29a62 100644 (file)
 global:
   nodePortPrefix: 302
   readinessRepository: oomk8s
-  readinessImage: readiness-check:2.0.0
+  readinessImage: readiness-check:2.0.2
 flavor: small
 #################################################################
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_oauth:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-oauth.onap"
-pullPolicy: Always
-
-
-# default number of instances
-replicaCount: 1
-
 nodeSelector: {}
 
 affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -62,19 +53,21 @@ ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 400Mi
-    requests:
-      cpu: 20m
-      memory: 200Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 600Mi
-    requests:
-      cpu: 40m
-      memory: 200Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#resources:
+#  small:
+#    limits:
+#      cpu: 200m
+#      memory: 400Mi
+#    requests:
+#      cpu: 20m
+#      memory: 200Mi
+#  large:
+#    limits:
+#      cpu: 400m
+#      memory: 600Mi
+#    requests:
+#      cpu: 40m
+#      memory: 200Mi
+#  unlimited: {}
index 92d2312..1801bfe 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   name: {{ include "common.fullname" . }}
   namespace: {{ include "common.namespace" . }}
 spec:
-  replicas: {{ .Values.replicaCount }}
+  replicas: {{ .Values.global.aaf.service.replicas }}
   template:
     metadata:
       labels:
@@ -32,61 +32,72 @@ spec:
     spec:
       initContainers:
       - name: {{ include "common.name" . }}-config-container
-        image: "{{ include "common.repository" . }}/{{ .Values.global.configImage }}"
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_config:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        command: ["bash","-c","cd /opt/app/aaf_config && bin/pod_wait.sh config nc aaf-cass.{{ .Release.Namespace }} 9042 sleep 15 remove && bin/agent.sh"]
         volumeMounts:
           - mountPath: "/opt/app/osaaf"
-            name: {{ include "common.name" . }}-config-vol
+            name: aaf-config-vol
+          - mountPath: "/opt/app/aaf/status"
+            name: aaf-status-vol
         env:
-          - name: HOSTNAME
-            value: "{{ .Values.global.cadi.hostname }}"
-          - name: AAF_ENV
-            value: "{{ .Values.global.cadi.aaf_env }}"
-          - name: AAF_REGISTER_AS
-            value: "{{ .Values.aaf_register_as }}"
-          - name: LATITUDE
-            value: "{{ .Values.global.cadi.cadi_latitude }}"
-          - name: LONGITUDE
-            value: "{{ .Values.global.cadi.cadi_longitude }}"
-          - name: CASS_HOST
-            value: "{{ .Values.global.cadi.cass_host }}"
-          - name: AAF_LOCATOR_AS
-            value: "{{ .Values.global.cadi.cadi_locator_as }}"
-      - name: {{ include "common.name" . }}-readiness
-        command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - aaf-cs
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          - name: aaf_env
+            value: "{{ .Values.global.aaf.aaf_env }}"
+          - name: cadi_latitude
+            value: "{{ .Values.global.aaf.cadi_latitude }}"
+          - name: cadi_longitude
+            value: "{{ .Values.global.aaf.cadi_longitude }}"
+          - name: cadi_x509_issuers
+            value: "{{ .Values.global.aaf.cadi_x509_issuers }}"
+          - name: aaf_locate_url
+            value: "https://aaf-locate.{{ .Release.Namespace}}:{{.Values.global.aaf.locate.internal_port}}"
+          - name: aaf_locator_container
+            value: "oom"
+          - name: aaf_release
+            value: "{{ .Values.global.aaf.aaf_release }}"
+          - name: aaf_locator_container_ns
+            value: "{{ .Release.Namespace }}"
+          - name: aaf_locator_public_fqdn
+            value: "{{.Values.global.aaf.public_fqdn}}"
+          - name: aaf_locator_name
+            value: "{{.Values.global.aaf.aaf_locator_name}}"
+          - name: aaf_locator_name_oom
+            value: "{{.Values.global.aaf.aaf_locator_name_oom}}"
+          - name: CASSANDRA_CLUSTER
+            value: "{{.Values.global.aaf.cass.fqdn}}.{{ .Release.Namespace }}"
+#          - name: CASSANDRA_USER
+#            value: ""
+#          - name: CASSANDRA_PASSWORD
+#            value: ""
+#         - name: CASSANDRA_PORT
+#            value: ""
       containers:
       - name: {{ include "common.name" . }}
-        command: ["/bin/bash","/opt/app/aaf/pod/pod_wait.sh","aaf_service","sleep","0","cd /opt/app/aaf;bin/service"]
-        image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+        command: ["/bin/bash","-c","cd /opt/app/aaf && bin/pod_wait.sh aaf-service aaf-cass  && exec bin/service"]
+        image: {{ .Values.global.repository }}/onap/aaf/aaf_service:{{.Values.global.aaf.imageVersion}}
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+        lifecycle:
+          preStop:
+            exec:
+              command: ["/bin/sh","-c","rm /opt/app/aaf/status/aaf-service* && echo $HOSTNAME >> aaf-service.hosts"]
         volumeMounts:
         - mountPath: "/opt/app/osaaf"
-          name: {{ include "common.name" . }}-config-vol
+          name: aaf-config-vol
+        - mountPath: "/opt/app/aaf/status"
+          name: aaf-status-vol
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
         {{- if eq .Values.liveness.enabled true }}
         livenessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.service.internal_port }}
           initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
           periodSeconds: {{ .Values.liveness.periodSeconds }}
         {{ end -}}
         readinessProbe:
           tcpSocket:
-            port: {{ .Values.service.internalPort }}
+            port: {{ .Values.global.aaf.service.internal_port }}
           initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
           periodSeconds: {{ .Values.readiness.periodSeconds }}
         resources:
@@ -103,7 +114,11 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: {{ include "common.name" . }}-config-vol
-        emptyDir: {}
+      - name: aaf-status-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-status-pvc
+      - name: aaf-config-vol
+        persistentVolumeClaim:
+          claimName: {{ .Release.Name }}-aaf-config-pvc
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 281aa1c..4a318ff 100644 (file)
@@ -24,18 +24,10 @@ metadata:
     heritage: {{ .Release.Service }}
 spec:
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      #Example internal target port if required
-      #targetPort: {{ .Values.service.internalPort }}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+    - port: {{ .Values.global.aaf.service.internal_port }}
+      nodePort: {{ .Values.global.aaf.service.public_port }}
+      name: aaf-service
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
-  type: {{ .Values.service.type }}
+  type: "NodePort"
index 42cebde..8794afe 100644 (file)
@@ -24,14 +24,6 @@ flavor: small
 # Application configuration defaults.
 #################################################################
 # application image
-repository: nexus3.onap.org:10001
-image: onap/aaf/aaf_service:2.1.9-SNAPSHOT-latest
-aaf_register_as: "aaf-service.onap"
-pullPolicy: Always
-
-
-# default number of instances
-replicaCount: 1
 
 nodeSelector: {}
 
@@ -39,14 +31,14 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 300
+  initialDelaySeconds: 120
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 30
+  initialDelaySeconds: 5
   periodSeconds: 10
 
 service:
@@ -56,26 +48,27 @@ service:
   #targetPort
   internalPort: 8100
   #port
-  externalPort: 8100
-  nodePort: 47
+  externalPort: 31110
 
 ingress:
   enabled: false
 
 # Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 600Mi
-    requests:
-      cpu: 20m
-      memory: 300Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 1Gi
-    requests:
-      cpu: 40m
-      memory: 300Mi
-  unlimited: {}
+resources: {}
+# Allow END users to do this, if they want.  Detrimental to Test services
+#resources:
+#  small:
+#    limits:
+#      cpu: 200m
+#      memory: 800Mi
+#    requests:
+#      cpu: 20m
+#      memory: 300Mi
+#  large:
+#    limits:
+#      cpu: 400m
+#      memory: 1Gi
+#    requests:
+#      cpu: 40m
+#      memory: 300Mi
+#  unlimited: {}
index 7cd5938..4dc13a5 100644 (file)
@@ -28,7 +28,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aaf/smsquorumclient:3.0.1
+image: onap/aaf/smsquorumclient:4.0.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 355cd60..360bf42 100644 (file)
@@ -108,4 +108,4 @@ resources:
     requests:
       cpu: 10m
       memory: 100Mi
-  unlimited: {}
\ No newline at end of file
+  unlimited: {}
index 28b46c2..4a8a9c0 100644 (file)
@@ -28,7 +28,7 @@ flavor: small
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aaf/sms:3.0.1
+image: onap/aaf/sms:4.0.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index bf64c6d..1fcc155 100644 (file)
@@ -21,7 +21,7 @@
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aaf/abrmd:3.0.0
+image: onap/aaf/abrmd:4.0.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 3993cfc..e8d2f7e 100644 (file)
@@ -23,7 +23,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aaf/distcenter:3.0.0
+image: onap/aaf/distcenter:4.0.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 3fd53d2..5c3618d 100644 (file)
@@ -23,7 +23,7 @@ enabled: true
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aaf/testcaservice:3.0.0
+image: onap/aaf/testcaservice:4.0.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index d068846..2d20d57 100644 (file)
@@ -62,4 +62,4 @@ resources:
     requests:
       cpu: 10m
       memory: 100Mi
-  unlimited: {}
\ No newline at end of file
+  unlimited: {}
diff --git a/kubernetes/aaf/templates/aaf-config-pv.yaml b/kubernetes/aaf/templates/aaf-config-pv.yaml
new file mode 100644 (file)
index 0000000..9a1e802
--- /dev/null
@@ -0,0 +1,48 @@
+{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) -}}
+#########
+##  ============LICENSE_START====================================================
+##  org.onap.aaf
+##  ===========================================================================
+##  Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+##  ===========================================================================
+##  Licensed under the Apache License, Version 2.0 (the "License");
+##  you may not use this file except in compliance with the License.
+##  You may obtain a copy of the License at
+##
+##       http://www.apache.org/licenses/LICENSE-2.0
+##
+##  Unless required by applicable law or agreed to in writing, software
+##  distributed under the License is distributed on an "AS IS" BASIS,
+##  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+##  See the License for the specific language governing permissions and
+##  limitations under the License.
+##  ============LICENSE_END====================================================
+##
+
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: {{ .Release.Name }}-aaf-config-pv
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ .Chart.Name  }}-config
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+    name: {{ include "common.fullname" . }}
+spec:
+  capacity:
+    storage: {{ .Values.persistence.config.size}}
+  accessModes:
+    - {{ .Values.persistence.config.accessMode }}
+  persistentVolumeReclaimPolicy: {{ .Values.persistence.config.volumeReclaimPolicy }}
+  hostPath:
+     path: {{ .Values.persistence.config.mountPath }}
+{{- if .Values.persistence.config.storageClass }}
+{{- if (eq "-" .Values.persistence.config.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.persistence.config.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/aaf/templates/aaf-config-pvc.yaml b/kubernetes/aaf/templates/aaf-config-pvc.yaml
new file mode 100644 (file)
index 0000000..b22df6d
--- /dev/null
@@ -0,0 +1,52 @@
+{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) -}}
+#########
+##  ============LICENSE_START====================================================
+##  org.onap.aaf
+##  ===========================================================================
+##  Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+##  ===========================================================================
+##  Licensed under the Apache License, Version 2.0 (the "License");
+##  you may not use this file except in compliance with the License.
+##  You may obtain a copy of the License at
+##
+##       http://www.apache.org/licenses/LICENSE-2.0
+##
+##  Unless required by applicable law or agreed to in writing, software
+##  distributed under the License is distributed on an "AS IS" BASIS,
+##  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+##  See the License for the specific language governing permissions and
+##  limitations under the License.
+##  ============LICENSE_END====================================================
+##
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ .Release.Name }}-aaf-config-pvc
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+  annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ include "common.name" . }}-config
+  accessModes:
+    - {{ .Values.persistence.config.accessMode }}
+  resources:
+    requests:
+      storage: {{ .Values.persistence.config.size }}
+{{- if .Values.persistence.config.storageClass }}
+{{- if (eq "-" .Values.persistence.config.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.persistence.config.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
+
diff --git a/kubernetes/aaf/templates/aaf-status-pv.yaml b/kubernetes/aaf/templates/aaf-status-pv.yaml
new file mode 100644 (file)
index 0000000..b8d12c5
--- /dev/null
@@ -0,0 +1,48 @@
+{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) -}}
+#########
+##  ============LICENSE_START====================================================
+##  org.onap.aaf
+##  ===========================================================================
+##  Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+##  ===========================================================================
+##  Licensed under the Apache License, Version 2.0 (the "License");
+##  you may not use this file except in compliance with the License.
+##  You may obtain a copy of the License at
+##
+##       http://www.apache.org/licenses/LICENSE-2.0
+##
+##  Unless required by applicable law or agreed to in writing, software
+##  distributed under the License is distributed on an "AS IS" BASIS,
+##  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+##  See the License for the specific language governing permissions and
+##  limitations under the License.
+##  ============LICENSE_END====================================================
+##
+
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: {{ .Release.Name }}-aaf-status-pv
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ .Chart.Name  }}-status
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+    name: {{ include "common.fullname" . }}
+spec:
+  capacity:
+    storage: {{ .Values.persistence.status.size}}
+  accessModes:
+    - {{ .Values.persistence.status.accessMode }}
+  persistentVolumeReclaimPolicy: {{ .Values.persistence.status.volumeReclaimPolicy }}
+  hostPath:
+     path: {{ .Values.persistence.status.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.status.mountSubPath }}
+{{- if .Values.persistence.status.storageClass }}
+{{- if (eq "-" .Values.persistence.status.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.persistence.status.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/kubernetes/aaf/templates/aaf-status-pvc.yaml b/kubernetes/aaf/templates/aaf-status-pvc.yaml
new file mode 100644 (file)
index 0000000..870ac9c
--- /dev/null
@@ -0,0 +1,52 @@
+{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) -}}
+#########
+##  ============LICENSE_START====================================================
+##  org.onap.aaf
+##  ===========================================================================
+##  Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
+##  ===========================================================================
+##  Licensed under the Apache License, Version 2.0 (the "License");
+##  you may not use this file except in compliance with the License.
+##  You may obtain a copy of the License at
+##
+##       http://www.apache.org/licenses/LICENSE-2.0
+##
+##  Unless required by applicable law or agreed to in writing, software
+##  distributed under the License is distributed on an "AS IS" BASIS,
+##  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+##  See the License for the specific language governing permissions and
+##  limitations under the License.
+##  ============LICENSE_END====================================================
+##
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ .Release.Name }}-aaf-status-pvc
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+{{- if .Values.persistence.annotations }}
+  annotations:
+{{ toYaml .Values.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+  selector:
+    matchLabels:
+      app: {{ include "common.name" . }}-status
+  accessModes:
+    - {{ .Values.persistence.status.accessMode }}
+  resources:
+    requests:
+      storage: {{ .Values.persistence.status.size }}
+{{- if .Values.persistence.status.storageClass }}
+{{- if (eq "-" .Values.persistence.status.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.persistence.status.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
+
index 5a095e4..1ddb604 100644 (file)
 global:
   nodePortPrefix: 302
   readinessRepository: oomk8s
-  readinessImage: readiness-check:2.0.0
+  readinessImage: readiness-check:2.0.2
   ubuntuInitRepository: registry.hub.docker.com
   ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
-  configImage: onap/aaf/aaf_config:2.1.9-SNAPSHOT-latest
   persistence:
     enabled: true
+  pullPolicy: Always
+  repository: "nexus3.onap.org:10001"
+  # pullPolicy: IfNotPresent
+  # repository: "nexus3.onap.org:10003"
 
-  cadi:
-    hostname: "aaf.onap"
+  aaf:
+    imageVersion: 2.1.13
+    #imageVersion: latest
+    readiness: false
+    aaf_env: "DEV"
+    public_fqdn: "aaf.osaaf.org"
+    aaf_release: "Dublin"
+  # DUBLIN ONLY - for M4 compatibility with Casablanca
+    aaf_locator_name: "public.%NS.%N"
+    aaf_locator_name_oom: "%NS.%N"
+  # EL ALTO and Beyond
+  #  aaf_locator_name: "%NS.%N"
+  #  aaf_locator_name_oom: "%CNS.%NS.%N"
     cadi_latitude: "38.0"
     cadi_longitude: "-72.0"
-    aaf_env: "DEV"
-    cass_host: "aaf-cass.onap"
-    cadi_locator_as: "aaf-locate.onap"
+    cadi_x509_issuers: "CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US"
+
+    cass:
+      replicas: 1
+      fqdn: "aaf-cass"
+      cluster_name: "osaaf"
+      heap_new_size: "512M"
+      max_heap_size: "1024M"
+      storage_port: 7000
+      ssl_storage_port: 7001
+      native_trans_port: 9042
+      rpc_port: 9160
+      dc: "dc1"
+    service:
+      replicas: 1
+      fqdn: "aaf-service"
+      internal_port: 8100
+      public_port: 31110
+    locate:
+      replicas: 1
+      fqdn: "aaf-locate"
+      internal_port: 8095
+      public_port: 31111
+    oauth:
+      replicas: 1
+      fqdn: "aaf0oauth"
+      internal_port: 8140
+      public_port: 31112
+    gui:
+      replicas: 1
+      fqdn: "aaf-gui"
+      internal_port: 8200
+      public_port: 31113
+    cm:
+      replicas: 1
+      fqdn: "aaf-cm"
+      internal_port: 8150
+      public_port: 31114
+    fs:
+      replicas: 1
+      fqdn: "aaf-fs"
+      internal_port: 8096
+      public_port: 31115
+    hello:
+      replicas: 0
+      fqdn: "aaf-hello"
+      internal_port: 8130
+      public_port: 31116
 
 #################################################################
 # Application configuration defaults.
 #################################################################
-repository: nexus3.onap.org:10001
 
 flavor: small
 # default number of instances
@@ -48,14 +106,14 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 10
+  initialDelaySeconds: 350
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
   enabled: true
 
 readiness:
-  initialDelaySeconds: 10
+  initialDelaySeconds: 150
   periodSeconds: 10
 
 ingress:
@@ -63,31 +121,35 @@ ingress:
 
 ## Persist data to a persitent volume
 persistence:
-  mountPath: "/mnt/data/aaf"
   enabled: true
   config:
     #existingClaim:
-    volumeReclaimPolicy: Retain
+    volumeReclaimPolicy: Delete
     accessMode: ReadWriteMany
     size: 2Gi
-    mountSubPath: "config"
     storageClass: "manual"
+    mountPath: "/mnt/data/aaf/config"
   logs:
     #existingClaim:
     volumeReclaimPolicy: Retain
     accessMode: ReadWriteMany
     size: 2Gi
-    mountSubPath: "logs"
     storageClass: "manual"
-
-aaf-cs:
-  persistence:
-    #existingClaim:
+    mountPath: "/mnt/data/aaf/logs"
+  status:
+    volumeReclaimPolicy: Delete
+    accessMode: ReadWriteMany
+    size: 2M
+    storageClass: "manual"
     mountPath: /dockerdata-nfs
-    mountSubPath: "cass"
+    mountSubPath: "status"
+  cass:
+    #existingClaim:
     volumeReclaimPolicy: Retain
     accessMode: ReadWriteOnce
     size: 10Gi
     storageClass: "manual"
+    mountPath: /dockerdata-nfs
+    mountSubPath: "cass"
 
 resources: {}
index 1dba1b1..e67a94e 160000 (submodule)
@@ -1 +1 @@
-Subproject commit 1dba1b169b3302d8abe9a24ade25679813b1348f
+Subproject commit e67a94e6be333271c8237d6ebd5fb0f489401350
index 5f1cc5a..1df20c2 100644 (file)
@@ -30,21 +30,6 @@ spec:
         app: {{ include "common.name" . }}
         release: {{ .Release.Name }}
     spec:
-      initContainers:
-      - command:
-        - /root/ready.py
-        args:
-        - --container-name
-        - {{ .Values.config.appcChartName }}
-        env:
-        - name: NAMESPACE
-          valueFrom:
-            fieldRef:
-              apiVersion: v1
-              fieldPath: metadata.namespace
-        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
-        name: {{ include "common.name" . }}-readiness
       containers:
         - name: {{ include "common.name" . }}
           command: ["/bin/bash"]
index 5c8d4c7..59cf29f 100644 (file)
@@ -29,7 +29,7 @@ flavor: small
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-ansible-server-image:0.4.2-STAGING-latest
+image: onap/ccsdk-ansible-server-image:0.4.2
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 2a5c5ce..349336e 100755 (executable)
@@ -30,7 +30,6 @@
         </encoder>
     </appender>
 
-
     <logger name="org.springframework" level="info"/>
     <logger name="org.springframework.web" level="info"/>
     <logger name="org.springframework.security.web.authentication" level="warn"/>
index 5c8bc8c..8985424 100755 (executable)
@@ -15,7 +15,7 @@
 apiVersion: v1
 kind: Service
 metadata:
-  name: {{ include "common.servicename" . }}
+  name: {{ include "common.servicename" . }}-http
   namespace: {{ include "common.namespace" . }}
   labels:
     app: {{ include "common.name" . }}
@@ -24,20 +24,35 @@ metadata:
     heritage: {{ .Release.Service }}
   annotations:
 spec:
-  type: {{ .Values.service.type }}
+  type: {{ .Values.service.http.type }}
   ports:
     - port: {{ .Values.service.http.externalPort }}
       targetPort: {{ .Values.service.http.internalPort }}
-      {{- if eq .Values.service.type "NodePort"}}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+      {{- if eq .Values.service.http.type "NodePort"}}
+      nodePort: {{ .Values.global.nodePortPrefixExt | default .Values.nodePortPrefixExt }}{{ .Values.service.http.nodePort }}
       {{- end}}
       name: {{ .Values.service.http.portName | default "http" }}
+  selector:
+    app: {{ include "common.name" . }}
+    release: {{ .Release.Name }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "common.servicename" . }}-grpc
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+  annotations:
+spec:
+  type: {{ .Values.service.grpc.type }}
+  ports:
     - port: {{ .Values.service.grpc.externalPort }}
       targetPort: {{ .Values.service.grpc.internalPort }}
-      {{- if eq .Values.service.type "NodePort"}}
-      nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      {{- end}}
       name: {{ .Values.service.grpc.portName | default "grpc" }}
   selector:
     app: {{ include "common.name" . }}
-    release: {{ .Release.Name }}
\ No newline at end of file
+    release: {{ .Release.Name }}
index 05ef5fe..6b88f84 100755 (executable)
@@ -20,7 +20,7 @@
 global:
   # Change to an unused port prefix range to prevent port conflicts
   # with other instances running within the same k8s cluster
-  nodePortPrefix: 302
+  nodePortPrefixExt: 304
 
   # image repositories
   repository: nexus3.onap.org:10001
@@ -40,7 +40,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-blueprintsprocessor:0.4-STAGING-latest
+image: onap/ccsdk-blueprintsprocessor:0.4.2
 pullPolicy: Always
 
 # flag to enable debugging - application support required
@@ -70,19 +70,20 @@ readiness:
   periodSeconds: 10
 
 service:
-  type: ClusterIP
   http:
+    type: NodePort
     portName: blueprints-processor-http
     internalPort: 8080
     externalPort: 8080
+    nodePort: 99
   grpc:
+    type: ClusterIP
     portName: blueprints-processor-grpc
     internalPort: 9111
     externalPort: 9111
 
 
 persistence:
-  enabled: true
   volumeReclaimPolicy: Retain
   accessMode: ReadWriteMany
   size: 2Gi
index fa49735..0c3ea78 100755 (executable)
@@ -40,7 +40,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-commandexecutor:0.4.2-STAGING-latest
+image: onap/ccsdk-commandexecutor:0.4.2
 pullPolicy: Always
 
 # application configuration
index e965e2f..2438358 100644 (file)
@@ -30,7 +30,6 @@
         </encoder>
     </appender>
 
-
     <logger name="org.springframework" level="info"/>
     <logger name="org.springframework.web" level="info"/>
     <logger name="org.springframework.security.web.authentication" level="warn"/>
index 246aae4..1fc0acb 100755 (executable)
@@ -38,7 +38,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-controllerblueprints:0.4-STAGING-latest
+image: onap/ccsdk-controllerblueprints:0.4.2
 pullPolicy: Always
 
 # flag to enable debugging - application support required
old mode 100644 (file)
new mode 100755 (executable)
similarity index 81%
rename from kubernetes/vfc/charts/vfc-nokia-vnfm-driver/Chart.yaml
rename to kubernetes/cds/charts/cds-sdc-listener/Chart.yaml
index 7748cc0..4385de1
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright (c) 2019 Bell Canada
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -13,6 +13,6 @@
 # limitations under the License.
 
 apiVersion: v1
-description: ONAP VFC - Nokia VNFM Driver
-name: vfc-nokia-vnfm-driver
+description: ONAP CDS SDC listener microservice
+name: cds-sdc-listener
 version: 4.0.0
\ No newline at end of file
diff --git a/kubernetes/cds/charts/cds-sdc-listener/requirements.yaml b/kubernetes/cds/charts/cds-sdc-listener/requirements.yaml
new file mode 100755 (executable)
index 0000000..a57d2b6
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+dependencies:
+  - name: common
+    version: ~4.x-0
+    repository: '@local'
\ No newline at end of file
diff --git a/kubernetes/cds/charts/cds-sdc-listener/resources/config/application.yaml b/kubernetes/cds/charts/cds-sdc-listener/resources/config/application.yaml
new file mode 100644 (file)
index 0000000..c02eac2
--- /dev/null
@@ -0,0 +1,20 @@
+listenerservice:
+  config:
+    asdcAddress: sdc-be:8443 #SDC-BE
+    messageBusAddress: message-router #Message-Router
+    user: vid #SDC-username
+    password: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U #SDC-password
+    pollingInterval: 15
+    pollingTimeout: 60
+    relevantArtifactTypes: TOSCA_CSAR
+    consumerGroup: cds
+    environmentName: AUTO
+    consumerId: cds
+    keyStorePassword:
+    keyStorePath:
+    activateServerTLSAuth : false
+    isUseHttpsWithDmaap: false
+    archivePath: /opt/app/onap/sdc-listener/
+    grpcAddress: cds-blueprints-processor
+    grpcPort: 9111
+    authHeader: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw==
\ No newline at end of file
diff --git a/kubernetes/cds/charts/cds-sdc-listener/resources/config/logback.xml b/kubernetes/cds/charts/cds-sdc-listener/resources/config/logback.xml
new file mode 100644 (file)
index 0000000..5715226
--- /dev/null
@@ -0,0 +1,43 @@
+<!--
+  ~ Copyright (c) 2019 Bell Canada
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration>
+
+  <property name="localPattern" value="%d{HH:mm:ss.SSS} %-5level %logger{100} - %msg%n" />
+
+  <property name="defaultPattern" value="%date{ISO8601,UTC}|%X{RequestID}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Timer}| %msg%n" />
+
+  <property name="debugLoggerPattern" value="%date{ISO8601,UTC}|%X{RequestID}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{ServerFQDN}|%X{RemoteHost}|%X{ClassName}|%X{Timer}|[%caller{3}]| %msg%n" />
+
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <!-- encoders are assigned the type
+         ch.qos.logback.classic.encoder.PatternLayoutEncoder by default -->
+    <encoder>
+      <pattern>${defaultPattern}</pattern>
+    </encoder>
+  </appender>
+
+  <logger name="org.springframework" level="info"/>
+  <logger name="org.springframework.web" level="info"/>
+  <logger name="org.springframework.security.web.authentication" level="warn"/>
+  <logger name="org.hibernate" level="error"/>
+  <logger name="org.onap.ccsdk.cds" level="info"/>
+
+  <root level="warn">
+    <appender-ref ref="STDOUT"/>
+  </root>
+
+</configuration>
\ No newline at end of file
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright (c) 2019 Bell Canada
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 apiVersion: v1
 kind: ConfigMap
 metadata:
-  name: {{ include "common.fullname" . }}-logging-configmap
+  name: {{ include "common.fullname" . }}-configmap
   namespace: {{ include "common.namespace" . }}
 data:
-{{ tpl (.Files.Glob "resources/config/logging/*").AsConfig . | indent 2 }}
\ No newline at end of file
+{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
\ No newline at end of file
diff --git a/kubernetes/cds/charts/cds-sdc-listener/templates/deployment.yaml b/kubernetes/cds/charts/cds-sdc-listener/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..0dfc68b
--- /dev/null
@@ -0,0 +1,102 @@
+# Copyright (c) 2019 Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: {{ include "common.fullname" . }}
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+spec:
+  replicas: {{ .Values.replicaCount }}
+  template:
+    metadata:
+      labels:
+        app: {{ include "common.name" . }}
+        release: {{ .Release.Name }}
+    spec:
+      initContainers:
+        - command:
+          - /root/ready.py
+          args:
+            - --container-name
+            - sdc-be
+            - --container-name
+            - message-router
+            - --container-name
+            - cds-blueprints-processor
+          env:
+          - name: NAMESPACE
+            valueFrom:
+              fieldRef:
+                apiVersion: v1
+                fieldPath: metadata.namespace
+          image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
+          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          name: {{ include "common.name" . }}-readiness
+      containers:
+        - name: {{ include "common.name" . }}
+          image: "{{ include "common.repository" . }}/{{ .Values.image }}"
+          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
+          env:
+          - name: APP_CONFIG_HOME
+            value: {{ .Values.config.appConfigDir }}
+          ports:
+          - containerPort: {{ .Values.service.http.internalPort }}
+          {{ if .Values.liveness.enabled }}
+          livenessProbe:
+            httpGet:
+              path: /api/v1/sdclistener/healthcheck
+              port: {{ .Values.service.http.internalPort }}
+            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.liveness.periodSeconds }}
+          {{end}}
+          readinessProbe:
+            httpGet:
+              path: /api/v1/sdclistener/healthcheck
+              port: {{ .Values.service.http.internalPort }}
+            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
+            periodSeconds: {{ .Values.readiness.periodSeconds }}
+          volumeMounts:
+          - mountPath: {{ .Values.config.appConfigDir }}/application.yaml
+            name: {{ include "common.fullname" . }}-config
+            subPath: application.yaml
+          - mountPath: {{ .Values.config.appConfigDir }}/logback.xml
+            name: {{ include "common.fullname" . }}-config
+            subPath: logback.xml
+          resources:
+{{ include "common.resources" . | indent 12 }}
+        {{- if .Values.nodeSelector }}
+        nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 10 }}
+        {{- end -}}
+        {{- if .Values.affinity }}
+        affinity:
+{{ toYaml .Values.affinity | indent 10 }}
+        {{- end }}
+      volumes:
+        - name: {{ include "common.fullname" . }}-config
+          configMap:
+            name: {{ include "common.fullname" . }}-configmap
+            items:
+            - key: application.yaml
+              path: application.yaml
+            - key: logback.xml
+              path: logback.xml
+      imagePullSecrets:
+      - name: {{ include "common.namespace" . }}-docker-registry-key
\ No newline at end of file
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright (c) 2019 Bell Canada
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 apiVersion: v1
 kind: Service
 metadata:
@@ -22,30 +21,15 @@ metadata:
     chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
     release: {{ .Release.Name }}
     heritage: {{ .Release.Service }}
-  annotations:
-    msb.onap.org/service-info: '[
-      {
-          "serviceName": "nokiavnfmdriver",
-          "version": "v1",
-          "url": "/api/nokiavnfmdriver/v1",
-          "protocol": "REST",
-          "port": "{{.Values.service.externalPort}}",
-          "visualRange":"1"
-      }
-      ]'
 spec:
   type: {{ .Values.service.type }}
   ports:
-    {{if eq .Values.service.type "NodePort" -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
+    - port: {{ .Values.service.http.externalPort }}
+      targetPort: {{ .Values.service.http.internalPort }}
+      {{- if eq .Values.service.type "NodePort"}}
       nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
-      name: {{ .Values.service.portName }}
-    {{- else -}}
-    - port: {{ .Values.service.externalPort }}
-      targetPort: {{ .Values.service.internalPort }}
-      name: {{ .Values.service.portName }}
-    {{- end}}
+      {{- end}}
+      name: {{ .Values.service.http.portName | default "http" }}
   selector:
     app: {{ include "common.name" . }}
     release: {{ .Release.Name }}
@@ -1,4 +1,4 @@
-# Copyright © 2017 Amdocs, Bell Canada
+# Copyright (c) 2019 Bell Canada
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 #################################################################
 # Global configuration defaults.
 #################################################################
 global:
+  # Change to an unused port prefix range to prevent port conflicts
+  # with other instances running within the same k8s cluster
   nodePortPrefix: 302
+
+  # image repositories
+  repository: nexus3.onap.org:10001
+
+  # readiness check
   readinessRepository: oomk8s
   readinessImage: readiness-check:2.0.0
-  loggingRepository: docker.elastic.co
-  loggingImage: beats/filebeat:5.5.0
+
+  # image pull policy
+  pullPolicy: Always
+
+  persistence:
+    mountPath: /dockerdata-nfs
 
 #################################################################
 # Application configuration defaults.
 #################################################################
 # application image
-flavor: small
-
 repository: nexus3.onap.org:10001
-image: onap/vfc/nfvo/svnfm/nokia:1.1.0
+image: onap/ccsdk-sdclistener:0.4.2
+name: sdc-listener
 pullPolicy: Always
 
-#Istio sidecar injection policy
-istioSidecar: true
-
 # flag to enable debugging - application support required
 debugEnabled: false
 
 # application configuration
-config: {}
+config:
+  appConfigDir: /opt/app/onap/config
 
 # default number of instances
 replicaCount: 1
@@ -50,7 +57,7 @@ affinity: {}
 
 # probe configuration parameters
 liveness:
-  initialDelaySeconds: 120
+  initialDelaySeconds: 10
   periodSeconds: 10
   # necessary to disable liveness probe when setting breakpoints
   # in debugger so K8s doesn't restart unresponsive container
@@ -62,29 +69,32 @@ readiness:
 
 service:
   type: ClusterIP
-  name: vfc-nokia-vnfm-driver
-  portName: vfc-nokia-vnfm-driver
-  externalPort: 8486
-  internalPort: 8486
-#  nodePort: 12
+  http:
+    portName: cds-sdc-listener-http
+    internalPort: 8080
+    externalPort: 8080
+
+persistence:
+  enabled: true
 
 ingress:
   enabled: false
 
-# Configure resource requests and limits
-resources:
-  small:
-    limits:
-      cpu: 200m
-      memory: 2000Mi
-    requests:
-      cpu: 100m
-      memory: 1000Mi
-  large:
-    limits:
-      cpu: 400m
-      memory: 4000Mi
-    requests:
-      cpu: 200m
-      memory: 2000Mi
-  unlimited: {}
\ No newline at end of file
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  #
+  # Example:
+  # Configure resource requests and limits
+  # ref: http://kubernetes.io/docs/user-guide/compute-resources/
+  # Minimum memory for development is 2 CPU cores and 4GB memory
+# Minimum memory for production is 4 CPU cores and 8GB memory
+#resources:
+#  limits:
+#    cpu: 2
+#    memory: 4Gi
+#  requests:
+#    cpu: 2
+#    memory: 4Gi
index 134e6b7..ead1b2e 100644 (file)
@@ -50,6 +50,14 @@ spec:
           env:
             - name: HOST
               value: 0.0.0.0
+            - name: API_BLUEPRINT_CONTROLLER_BASE_URL
+              value: {{ .Values.config.api.controller.baseUrl }}
+            - name: API_BLUEPRINT_CONTROLLER_AUTH_TOKEN
+              value: {{ .Values.config.api.controller.authToken }}
+            - name: API_BLUEPRINT_PROCESSOR_BASE_URL
+              value: {{ .Values.config.api.processor.baseUrl }}
+            - name: API_BLUEPRINT_PROCESSOR_AUTH_TOKEN
+              value: {{ .Values.config.api.processor.authToken }}
           readinessProbe:
             tcpSocket:
               port: {{ .Values.service.internalPort }}
index efcd7e2..1b383fb 100644 (file)
@@ -28,11 +28,18 @@ subChartsOnly:
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-cds-ui-server:0.4.2-STAGING-latest
+image: onap/ccsdk-cds-ui-server:0.4.2
 pullPolicy: Always
 
 # application configuration
 config:
+  api:
+    controller:
+      baseUrl: http://cds-controller-blueprints:8080/api/v1
+      authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw==
+    processor:
+      baseUrl: http://cds-blueprints-processor-http:8080/api/v1
+      authToken: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw==
 
 # default number of instances
 replicaCount: 1
@@ -61,7 +68,7 @@ readiness:
 
 service:
   type: NodePort
-  portName: ui
+  portName: cds-ui
   name: cds-ui
   nodePort: 97
   internalPort: 3000
index 8e51f62..5d6f14f 100644 (file)
@@ -22,7 +22,7 @@
        id bigint not null,
         log_instant datetime(6) not null,
         log_type varchar(255) not null,
-        message varchar(255) not null,
+        message MEDIUMTEXT not null,
         loop_id varchar(255) not null,
         primary key (id)
     ) engine=InnoDB;
         primary key (name)
     ) engine=InnoDB;
 
-    alter table loop_logs
-       add constraint FK1j0cda46aickcaoxqoo34khg2
-       foreign key (loop_id)
+    alter table loop_logs 
+       add constraint FK1j0cda46aickcaoxqoo34khg2 
+       foreign key (loop_id) 
        references loops (name);
 
-    alter table loops_microservicepolicies
-       add constraint FKem7tp1cdlpwe28av7ef91j1yl
-       foreign key (microservicepolicy_id)
+    alter table loops_microservicepolicies 
+       add constraint FKem7tp1cdlpwe28av7ef91j1yl 
+       foreign key (microservicepolicy_id) 
        references micro_service_policies (name);
 
-    alter table loops_microservicepolicies
-       add constraint FKsvx91jekgdkfh34iaxtjfgebt
-       foreign key (loop_id)
+    alter table loops_microservicepolicies 
+       add constraint FKsvx91jekgdkfh34iaxtjfgebt 
+       foreign key (loop_id) 
        references loops (name);
 
-    alter table operational_policies
-       add constraint FK1ddoggk9ni2bnqighv6ecmuwu
-       foreign key (loop_id)
+    alter table operational_policies 
+       add constraint FK1ddoggk9ni2bnqighv6ecmuwu 
+       foreign key (loop_id) 
        references loops (name);
index 4534d6b..a806e77 100644 (file)
@@ -56,8 +56,9 @@ config:
         "clamp.config.files.sdcController": "file:/opt/clamp/sdc-controllers-config.json",
         "clamp.config.dcae.inventory.url": "http://inventory.{{ include "common.namespace" . }}:8080",
         "clamp.config.dcae.dispatcher.url": "https4://deployment-handler.{{ include "common.namespace" . }}:8443",
-        "clamp.config.dcae.dispatcher.userName":"test",
-        "clamp.config.dcae.dispatcher.password":"test",
+        "clamp.config.dcae.deployment.url": "https4://deployment-handler.{{ include "common.namespace" . }}:8443",
+        "clamp.config.dcae.deployment.userName": "none",
+        "clamp.config.dcae.deployment.password": "none",
         "clamp.config.policy.api.url": "http4://policy-api.{{ include "common.namespace" . }}:6969",
         "clamp.config.policy.api.userName": "healthcheck",
         "clamp.config.policy.api.password": "zb!XztG34",
diff --git a/kubernetes/common/cassandra/resources/config/docker-entrypoint.sh b/kubernetes/common/cassandra/resources/config/docker-entrypoint.sh
new file mode 100644 (file)
index 0000000..5b65222
--- /dev/null
@@ -0,0 +1,92 @@
+#!/bin/bash
+set -e
+
+# first arg is `-f` or `--some-option`
+# or there are no args
+if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ]; then
+        set -- cassandra -f "$@"
+fi
+
+# allow the container to be started with `--user`
+if [ "$1" = 'cassandra' -a "$(id -u)" = '0' ]; then
+        find /var/lib/cassandra /var/log/cassandra "$CASSANDRA_CONFIG" \
+                \! -user cassandra -exec chown cassandra '{}' +
+        exec gosu cassandra "$BASH_SOURCE" "$@"
+fi
+
+_ip_address() {
+        # scrape the first non-localhost IP address of the container
+        # in Swarm Mode, we often get two IPs -- the container IP, and the (shared) VIP, and the container IP should always be first
+        ip address | awk '
+                $1 == "inet" && $NF != "lo" {
+                        gsub(/\/.+$/, "", $2)
+                        print $2
+                        exit
+                }
+        '
+}
+
+# "sed -i", but without "mv" (which doesn't work on a bind-mounted file, for example)
+_sed-in-place() {
+        local filename="$1"; shift
+        local tempFile
+        tempFile="$(mktemp)"
+        sed "$@" "$filename" > "$tempFile"
+        cat "$tempFile" > "$filename"
+        rm "$tempFile"
+}
+
+if [ "$1" = 'cassandra' ]; then
+        : ${CASSANDRA_RPC_ADDRESS='0.0.0.0'}
+
+        : ${CASSANDRA_LISTEN_ADDRESS='auto'}
+        if [ "$CASSANDRA_LISTEN_ADDRESS" = 'auto' ]; then
+                CASSANDRA_LISTEN_ADDRESS="$(_ip_address)"
+        fi
+
+        : ${CASSANDRA_BROADCAST_ADDRESS="$CASSANDRA_LISTEN_ADDRESS"}
+
+        if [ "$CASSANDRA_BROADCAST_ADDRESS" = 'auto' ]; then
+                CASSANDRA_BROADCAST_ADDRESS="$(_ip_address)"
+        fi
+        : ${CASSANDRA_BROADCAST_RPC_ADDRESS:=$CASSANDRA_BROADCAST_ADDRESS}
+
+        if [ -n "${CASSANDRA_NAME:+1}" ]; then
+                : ${CASSANDRA_SEEDS:="cassandra"}
+        fi
+        : ${CASSANDRA_SEEDS:="$CASSANDRA_BROADCAST_ADDRESS"}
+
+        _sed-in-place "$CASSANDRA_CONFIG/cassandra.yaml" \
+                -r 's/(- seeds:).*/\1 "'"$CASSANDRA_SEEDS"'"/'
+
+        for yaml in \
+                broadcast_address \
+                broadcast_rpc_address \
+                cluster_name \
+                endpoint_snitch \
+                listen_address \
+                num_tokens \
+                rpc_address \
+                start_rpc \
+                authenticator \
+        ; do
+                var="CASSANDRA_${yaml^^}"
+                val="${!var}"
+                if [ "$val" ]; then
+                        _sed-in-place "$CASSANDRA_CONFIG/cassandra.yaml" \
+                                -r 's/^(# )?('"$yaml"':).*/\2 '"$val"'/'
+                fi
+        done
+
+        for rackdc in dc rack; do
+                var="CASSANDRA_${rackdc^^}"
+                val="${!var}"
+                if [ "$val" ]; then
+                        _sed-in-place "$CASSANDRA_CONFIG/cassandra-rackdc.properties" \
+                                -r 's/^('"$rackdc"'=).*/\1 '"$val"'/'
+                fi
+        done
+fi
+
+exec "$@"
+
index a9420d7..abb8a7e 100644 (file)
@@ -12,4 +12,16 @@ metadata:
 data:
 {{ toYaml .Values.configOverrides | indent 2 }}
 {{- end }}
-
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "common.fullname" . }}-entrypoint
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+    release: {{ .Release.Name }}
+    heritage: {{ .Release.Service }}
+data:
+{{ tpl (.Files.Glob "resources/config/docker-entrypoint.sh").AsConfig . | indent 2 }}
index a0b6b5f..0c7a112 100644 (file)
@@ -54,6 +54,9 @@ spec:
           - name: localtime
             mountPath: /etc/localtime
             readOnly: true
+          - name: cassandra-entrypoint
+            mountPath: /docker-entrypoint.sh
+            subPath: docker-entrypoint.sh
           {{- range $key, $value := .Values.configOverrides }}
           - name: cassandra-config-{{ $key | replace "." "-" }}
             mountPath: /etc/cassandra/{{ $key }}
@@ -110,6 +113,8 @@ spec:
             value: {{ default "true" .Values.config.start_rpc | quote }}
           - name: CASSANDRA_ENDPOINT_SNITCH
             value: {{ default "GossipingPropertyFileSnitch" .Values.config.endpoint_snitch | quote }}
+          - name: CASSANDRA_AUTHENTICATOR
+            value: {{ default "PasswordAuthenticator" .Values.config.authenticator | quote }}
           - name: POD_IP
             valueFrom:
               fieldRef:
@@ -141,6 +146,10 @@ spec:
         configMap:
           name: {{ include "common.fullname" . }}-configOverrides
       {{- end }}
+      - name: cassandra-entrypoint
+        configMap:
+          name: {{ include "common.fullname" . }}-entrypoint
+          defaultMode: 0755
   {{- if not .Values.persistence.enabled }}
       - name: cassandra-data
         emptyDir: {}
index d766b43..17760a7 100644 (file)
@@ -21,7 +21,7 @@ global: # global defaults
 
 # application image
 repository: nexus3.onap.org:10001
-image: library/cassandra:2.1.17
+image: library/cassandra:2.2.14
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index e07c904..944b63f 100644 (file)
@@ -47,7 +47,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-dgbuilder-image:0.4.2-STAGING-latest
+image: onap/ccsdk-dgbuilder-image:0.4.2
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index b9b6430..888a07a 100644 (file)
@@ -54,7 +54,7 @@ mariadb-galera:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/ccsdk-apps-ms-neng:0.4.1-STAGING-latest
+image: onap/ccsdk-apps-ms-neng:0.4.2
 pullPolicy: IfNotPresent
 
 # application configuration
index cc71522..409da39 100755 (executable)
 #
 # This installation is for an RKE install of kubernetes
 # after this run the standard oom install
-# this installation can be run on amy ubuntu 16.04 VM, RHEL 7.6 (root only), physical or cloud azure/aws host
+# this installation can be run on any ubuntu 16.04/18.04 VM, RHEL 7.6 (root only), physical or cloud azure/aws host
 # https://wiki.onap.org/display/DW/OOM+RKE+Kubernetes+Deployment
 # source from https://jira.onap.org/browse/OOM-1598
 #
 # master/dublin 
 #     RKE 0.1.16 Kubernetes 1.11.6, kubectl 1.11.6, Helm 2.9.1, Docker 18.06
+#     20190428 RKE 0.2.1, Kubernetes 1.13.5, kubectl 1.13.5, Helm 2.12.3, Docker 18.09.5
 # single node install, HA pending
 
 usage() {
@@ -42,19 +43,11 @@ EOF
 install_onap() {
   #constants
   PORT=8880
-  if [ "$BRANCH" == "casablanca" ]; then
-    KUBERNETES_VERSION=
-    RKE_VERSION=0.1.15
-    KUBECTL_VERSION=1.11.3
-    HELM_VERSION=2.9.1
-    DOCKER_VERSION=17.03
-  else
-    KUBERNETES_VERSION=
-    RKE_VERSION=0.1.16
-    KUBECTL_VERSION=1.11.6
-    HELM_VERSION=2.9.1
-    DOCKER_VERSION=18.06
-  fi
+  KUBERNETES_VERSION=
+  RKE_VERSION=0.2.1
+  KUBECTL_VERSION=1.13.5
+  HELM_VERSION=2.12.3
+  DOCKER_VERSION=18.09
  
   # copy your private ssh key and cluster.yml file to the vm
   # on your dev machine
@@ -68,7 +61,7 @@ install_onap() {
   # sudo vi ~/.ssh/authorized_keys
 
   echo "please supply your ssh key as provided by the -k keyname - it must be be chmod 400 and chown user:user in ~/.ssh/"
-  echo "The RKE version specific cluster.yaml is already integrated in this script for 0.1.15/0.1.16 no need for below generation..."
+  echo "The RKE version specific cluster.yaml is already integrated in this script for 0.2.1 no need for below generation..."
   echo "rke config --name cluster.yml"
   echo "specifically"
   echo "address: $SERVER"
@@ -78,16 +71,9 @@ install_onap() {
   RKETOOLS=
   HYPERCUBE=
   POD_INFRA_CONTAINER=
-  if [ "$RKE_VERSION" == "0.1.16" ]; then  
-    RKETOOLS=0.1.15
-    HYPERCUBE=1.11.6-rancher1
-    POD_INFRA_CONTAINER=rancher/pause-amd64:3.1
-  else
-    # 0.1.15
-    RKETOOLS=0.1.14
-    HYPERCUBE=1.11.3-rancher1
-    POD_INFRA_CONTAINER=gcr.io.google_containers/pause-amd64:3.1
-  fi
+  RKETOOLS=0.1.27
+  HYPERCUBE=1.13.5-rancher1
+  POD_INFRA_CONTAINER=rancher/pause:3.1
 
   cat > cluster.yml <<EOF
 # generated from rke_setup.sh
@@ -104,6 +90,8 @@ nodes:
   docker_socket: /var/run/docker.sock
   ssh_key: ""
   ssh_key_path: $SSHPATH_PREFIX/$SSHKEY
+  ssh_cert: ""
+  ssh_cert_path: ""
   labels: {}
 services:
   etcd:
@@ -119,6 +107,7 @@ services:
     snapshot: null
     retention: ""
     creation: ""
+    backup_config: null
   kube-api:
     image: ""
     extra_args: {}
@@ -127,6 +116,7 @@ services:
     service_cluster_ip_range: 10.43.0.0/16
     service_node_port_range: ""
     pod_security_policy: false
+    always_pull_images: false
   kube-controller:
     image: ""
     extra_args: {}
@@ -159,35 +149,36 @@ network:
   options: {}
 authentication:
   strategy: x509
-  options: {}
   sans: []
+  webhook: null
 system_images:
-  etcd: rancher/coreos-etcd:v3.2.18
+  etcd: rancher/coreos-etcd:v3.2.24-rancher1
   alpine: rancher/rke-tools:v$RKETOOLS
   nginx_proxy: rancher/rke-tools:v$RKETOOLS
   cert_downloader: rancher/rke-tools:v$RKETOOLS
   kubernetes_services_sidecar: rancher/rke-tools:v$RKETOOLS
-  kubedns: rancher/k8s-dns-kube-dns-amd64:1.14.10
-  dnsmasq: rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10
-  kubedns_sidecar: rancher/k8s-dns-sidecar-amd64:1.14.10
-  kubedns_autoscaler: rancher/cluster-proportional-autoscaler-amd64:1.0.0
+  kubedns: rancher/k8s-dns-kube-dns:1.15.0
+  dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
+  kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
+  kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
   kubernetes: rancher/hyperkube:v$HYPERCUBE
-  flannel: rancher/coreos-flannel:v0.10.0
-  flannel_cni: rancher/coreos-flannel-cni:v0.3.0
-  calico_node: rancher/calico-node:v3.1.3
-  calico_cni: rancher/calico-cni:v3.1.3
+  flannel: rancher/coreos-flannel:v0.10.0-rancher1
+  flannel_cni: rancher/flannel-cni:v0.3.0-rancher1
+  calico_node: rancher/calico-node:v3.4.0
+  calico_cni: rancher/calico-cni:v3.4.0
   calico_controllers: ""
   calico_ctl: rancher/calico-ctl:v2.0.0
-  canal_node: rancher/calico-node:v3.1.3
-  canal_cni: rancher/calico-cni:v3.1.3
+  canal_node: rancher/calico-node:v3.4.0
+  canal_cni: rancher/calico-cni:v3.4.0
   canal_flannel: rancher/coreos-flannel:v0.10.0
-  wave_node: weaveworks/weave-kube:2.1.2
-  weave_cni: weaveworks/weave-npc:2.1.2
+  wave_node: weaveworks/weave-kube:2.5.0
+  weave_cni: weaveworks/weave-npc:2.5.0
   pod_infra_container: $POD_INFRA_CONTAINER
-  ingress: rancher/nginx-ingress-controller:0.16.2-rancher1
-  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4
-  metrics_server: rancher/metrics-server-amd64:v0.2.1
+  ingress: rancher/nginx-ingress-controller:0.21.0-rancher3
+  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1
+  metrics_server: rancher/metrics-server:v0.3.1
 ssh_key_path: $SSHPATH
+ssh_cert_path: ""
 ssh_agent_auth: false
 authorization:
   mode: rbac
@@ -211,9 +202,15 @@ bastion_host:
   user: ""
   ssh_key: ""
   ssh_key_path: ""
+  ssh_cert: ""
+  ssh_cert_path: ""
 monitoring:
   provider: ""
   options: {}
+restore:
+  restore: false
+  snapshot_name: ""
+dns: null
 EOF
 
 
index 05aada2..6c5bb9a 100644 (file)
@@ -27,7 +27,7 @@ global:
   loggingRepository: docker.elastic.co
   loggingImage: beats/filebeat:5.5.0
   tlsRepository: nexus3.onap.org:10001
-  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3-STAGING-latest
+  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
 
 config:
   logstashServiceName: log-ls
@@ -90,7 +90,7 @@ postgres:
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.15-STAGING-latest
+image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.16
 default_k8s_location: central
 
 # DCAE component images to be deployed via Cloudify Manager
@@ -99,11 +99,11 @@ componentImages:
   dashboard: onap/org.onap.ccsdk.dashboard.ccsdk-app-os:1.1.0-SNAPSHOT-latest
   holmes_rules: onap/holmes/rule-management:1.2.4-STAGING-latest
   holmes_engine: onap/holmes/engine-management:1.2.3-STAGING-latest
-  tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.0-STAGING-latest
-  ves: onap/org.onap.dcaegen2.collectors.ves.vescollector:1.4.3
-  snmptrap: onap/org.onap.dcaegen2.collectors.snmptrap:1.4.0-STAGING-latest
+  tca: onap/org.onap.dcaegen2.deployments.tca-cdap-container:1.1.1
+  ves: onap/org.onap.dcaegen2.collectors.ves.vescollector:1.4.4
+  snmptrap: onap/org.onap.dcaegen2.collectors.snmptrap:1.4.0
   prh: onap/org.onap.dcaegen2.services.prh.prh-app-server:1.2.2
-  hv_ves: onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.1.0-SNAPSHOT
+  hv_ves: onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:1.1.0
 
 # Resource Limit flavor -By Default using small
 flavor: small
index a432226..41f0750 100644 (file)
@@ -44,14 +44,14 @@ config:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.cm-container:1.6.2-STAGING-latest
+image: onap/org.onap.dcaegen2.deployments.cm-container:1.6.2
 pullPolicy: Always
 
 # name of shared ConfigMap with kubeconfig for multiple clusters
 multisiteConfigMapName: multisite-kubeconfig-configmap
 
 # image for init container to initialize shared ConfigMap
-multisiteInitImage: onap/org.onap.dcaegen2.deployments.multisite-init-container:1.0.0-STAGING-latest
+multisiteInitImage: onap/org.onap.dcaegen2.deployments.multisite-init-container:1.0.0
 
 # probe configuration parameters
 liveness:
index fc0ca87..4605e88 100644 (file)
@@ -43,7 +43,7 @@ config:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.platform.configbinding.app-app:2.3.0-STAGING-latest
+image: onap/org.onap.dcaegen2.platform.configbinding.app-app:2.3.0
 pullPolicy: Always
 
 # probe configuration parameters
index ebc36a6..0eddf7c 100644 (file)
@@ -25,9 +25,9 @@ global:
   loggingRepository: docker.elastic.co
   loggingImage: beats/filebeat:5.5.0
   tlsRepository: nexus3.onap.org:10001
-  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.2-STAGING-latest
+  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
   consulLoaderRepository: nexus3.onap.org:10001
-  consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0-STAGING-latest
+  consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
   repositoryCred:
     user: docker
     password: docker
index 97cd7d2..913cb71 100644 (file)
@@ -45,7 +45,7 @@ readiness:
   periodSeconds: 10
 # application image
 repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4-STAGING-latest
+image: onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4
 
 # Resource Limit flavor -By Default using small
 flavor: small
index 74abbb0..1db11ad 100644 (file)
       "interval": 600
     },
     "policy_engine": {
-      "url": "https://{{ .Values.config.address.policy_pdp }}.{{include "common.namespace" . }}:8081",
-      "path_decision": "/decision/v1",
+      "url": "https://{{ .Values.config.address.policy_xacml_pdp }}:6969",
+      "path_decision": "/policy/pdpx/v1/decision"
       "path_notifications": "/pdp/notifications",
       "path_api": "/pdp/api/",
       "headers": {
         "Accept": "application/json",
         "Content-Type": "application/json",
         "ClientAuth": "cHl0aG9uOnRlc3Q=",
-        "Authorization": "Basic dGVzdHBkcDphbHBoYTEyMw==",
+        "Authorization": "Basic aGVhbHRoY2hlY2s6emIhWHp0RzM0",
         "Environment": "TEST"
       },
       "target_entity": "policy_engine",
index e110f1c..3b15c55 100644 (file)
@@ -25,9 +25,9 @@ global:
   loggingRepository: docker.elastic.co
   loggingImage: beats/filebeat:5.5.0
   tlsRepository: nexus3.onap.org:10001
-  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.2-STAGING-latest
+  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
   consulLoaderRepository: nexus3.onap.org:10001
-  consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0-STAGING-latest
+  consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
   repositoryCred:
     user: docker
     password: docker
@@ -40,13 +40,14 @@ config:
     consul:
       host: consul-server
       port: 8500
+    policy_xacml_pdp: policy-xacml-pdp
 
 #################################################################
 # Application configuration defaults.
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.platform.policy-handler:5.0.0-STAGING-latest
+image: onap/org.onap.dcaegen2.platform.policy-handler:5.0.0
 pullPolicy: Always
 
 # probe configuration parameters
index 927f19e..254ec39 100644 (file)
@@ -19,8 +19,8 @@
 global:
   nodePortPrefix: 302
   tlsRepository: nexus3.onap.org:10001
-  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3-STAGING-latest
+  tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.3
   consulLoaderRepository: nexus3.onap.org:10001
-  consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0-STAGING-latest
+  consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
 redis:
   replicaCount: 6
index dc9298d..bcfefe9 100644 (file)
@@ -101,6 +101,9 @@ MR.TopicMgrRole: org.onap.dmaap-bc-topic-mgr.client
 # MR topic ProjectID (used in certain topic name generation formats)
 MR.projectID:  mr
 
+# Use Basic Authentication when provisioning topics
+MR.authentication: basicAuth
+
 
 #####################################################
 #
index 5406ade..6b97414 100644 (file)
@@ -30,7 +30,7 @@ pullPolicy: Always
 
 # application images
 repository: nexus3.onap.org:10001
-image: onap/dmaap/dmaap-bc:1.1.4-STAGING-latest
+image: onap/dmaap/dmaap-bc:1.1.5
 
 
 # application configuration
index 90c4137..043bb8b 100644 (file)
@@ -1,14 +1,14 @@
 cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
 cadi_keyfile=/opt/app/datartr/aaf_certs/org.onap.dmaap-dr.keyfile
 cadi_keystore=/opt/app/datartr/aaf_certs/org.onap.dmaap-dr.jks
-cadi_keystore_password=]3V)($O&.Mv]W{f8^]6SxGNL
-cadi_key_password=]3V)($O&.Mv]W{f8^]6SxGNL
+cadi_keystore_password=WGxd2P6MDo*Bi4+UdzWs{?$8
+cadi_key_password=WGxd2P6MDo*Bi4+UdzWs{?$8
 cadi_alias=dmaap-dr-node@dmaap-dr.onap.org
 cadi_truststore=/opt/app/datartr/aaf_certs/org.onap.dmaap-dr.trust.jks
-cadi_truststore_password=(Rd,&{]%ePdp}4JZjqoJ2G+g
+cadi_truststore_password=)OBvCd{e{aWq.^mJJdX:S:1&
 
 aaf_env=DEV
-aaf_locate_url=https://aaf-onap-test.osaaf.org:8095
+aaf_locate_url=https://aaf-locate:8095
 aaf_oauth2_introspect_url=https://AAF_LOCATE_URL/AAF_NS.introspect:2.1/introspect
 aaf_oauth2_token_url=https://AAF_LOCATE_URL/AAF_NS.token:2.1/token
 aaf_url=https://AAF_LOCATE_URL/AAF_NS.service:2.1
index 08e3fd0..5de5fc1 100644 (file)
@@ -122,7 +122,7 @@ AAFInstance = legacy
 AAFAction = publish
 #
 #    AAF URL to connect to AAF server
-AafUrl = https://aaf-onap-test.osaaf.org:8095
+AafUrl = https://aaf-locate:8095
 #
 #    AAF CADI enabled flag
 CadiEnabled = false
index 6a87514..79abe22 100644 (file)
@@ -1,14 +1,14 @@
 cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
 cadi_keyfile=/opt/app/datartr/aaf_certs/org.onap.dmaap-dr.keyfile
 cadi_keystore=/opt/app/datartr/aaf_certs/org.onap.dmaap-dr.jks
-cadi_keystore_password=AT{];bvaDiytVD&oWhMZj0N5
-cadi_key_password=AT{];bvaDiytVD&oWhMZj0N5
+cadi_keystore_password=FZNkU,B%NJzcT1v7;^v]M#ZX
+cadi_key_password=FZNkU,B%NJzcT1v7;^v]M#ZX
 cadi_alias=dmaap-dr-prov@dmaap-dr.onap.org
 cadi_truststore=/opt/app/datartr/aaf_certs/org.onap.dmaap-dr.trust.jks
-cadi_truststore_password=ljlS@Y}0]{UO(TnwvEWkgJ%]
+cadi_truststore_password=+mzf@J.D^;3!![*Xr.z$c#?b
 
 aaf_env=DEV
-aaf_locate_url=https://aaf-onap-test.osaaf.org:8095
+aaf_locate_url=https://aaf-locate:8095
 aaf_oauth2_introspect_url=https://AAF_LOCATE_URL/AAF_NS.introspect:2.1/introspect
 aaf_oauth2_token_url=https://AAF_LOCATE_URL/AAF_NS.token:2.1/token
 aaf_url=https://AAF_LOCATE_URL/AAF_NS.service:2.1
index 6a6f49c..a4e96f0 100644 (file)
@@ -62,5 +62,5 @@ org.onap.dmaap.datarouter.provserver.aaf.action.publish   = publish
 org.onap.dmaap.datarouter.provserver.aaf.action.subscribe = subscribe
 
 # AAF URL to connect to AAF server
-org.onap.dmaap.datarouter.provserver.cadi.aaf.url = https://aaf-onap-test.osaaf.org:8095
+org.onap.dmaap.datarouter.provserver.cadi.aaf.url = https://aaf-locate:8095
 
index 6d78950..f0d07ae 100644 (file)
@@ -30,7 +30,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dmaap/kafka111:1.0.0
+image: onap/dmaap/kafka111:1.0.1
 pullPolicy: Always
 ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
 busyBoxImage: busybox:1.30
index 6d62edd..2f63406 100644 (file)
@@ -30,7 +30,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dmaap/kafka111:1.0.0
+image: onap/dmaap/kafka111:1.0.1
 pullPolicy: Always
 ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
 
index 9c737e5..4c5541b 100644 (file)
@@ -30,7 +30,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dmaap/zookeeper:5.0.0
+image: onap/dmaap/zookeeper:6.0.0
 pullPolicy: Always
 ubuntuInitImage: oomk8s/ubuntu-init:2.0.0
 busyBoxImage: busybox:1.30
index 948577a..4e0b085 100755 (executable)
@@ -3,15 +3,15 @@ aaf_url=https://AAF_LOCATE_URL/AAF_NS.service:2.1
 aaf_env=DEV
 aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
 
-cadi_truststore=/appl/dmaapMR1/etc/truststoreONAPall.jks
-cadi_truststore_password=changeit
+cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+cadi_truststore_password=enc:gvXm0E9p-_SRNw5_feOUE7wqXBxgxV3S_bdAyB08Sq9F35cCUZHWgQyKIDtTAbEw
 
-cadi_keyfile=/appl/dmaapMR1/etc/keyfilenew
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
 
 cadi_alias=dmaapmr@mr.dmaap.onap.org
 cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=Messaging for All
-cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US
+cadi_keystore_password=enc:pLMCzQzk-OP7IpYNi0TPtQSkNcraFAdarZG8HbdOKq4BycW6g_7mfhphLhOZo6ht
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
 
 
 cadi_loglevel=INFO
index 9e732d2..6c201f6 100644 (file)
@@ -1,6 +1,6 @@
 {
   "dcaeLocationName": "san-francisco",
   "fqdn": "message-router",
-  "topicProtocol": "http",
-  "topicPort": "3904"
+  "topicProtocol": "https",
+  "topicPort": "3905"
 }
index cfd7127..54c8982 100644 (file)
@@ -28,7 +28,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dmaap/dmaap-mr:1.1.13
+image: onap/dmaap/dmaap-mr:1.1.14
 pullPolicy: Always
 
 kafka:
index 1c18bb2..aa5165d 100644 (file)
@@ -22,7 +22,7 @@ global:
   readinessImage: readiness-check:2.0.0
   loggingRepository: docker.elastic.co
   loggingImage: beats/filebeat:5.5.0
-  clientImage: onap/dmaap/dbc-client:1.0.8-STAGING-latest
+  clientImage: onap/dmaap/dbc-client:1.0.9
 # application configuration
 config:
   logstashServiceName: log-ls
index 3db8bcb..952dd0a 100644 (file)
@@ -24,7 +24,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aai/esr-gui:1.4.0-STAGING-latest
+image: onap/aai/esr-gui:1.4.0
 pullPolicy: Always
 msbaddr: msb-iag.{{ include "common.namespace" . }}:80
 
index d6ac88f..98203fc 100644 (file)
@@ -27,7 +27,7 @@ subChartsOnly:
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/aai/esr-server:1.4.0-STAGING-latest
+image: onap/aai/esr-server:1.4.0
 pullPolicy: Always
 msbaddr: msb-iag.{{ include "common.namespace" . }}:80
 
index 053fe1f..53ce0d0 100644 (file)
@@ -24,7 +24,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/multicloud/openstack-ocata:1.3.0-STAGING
+image: onap/multicloud/openstack-ocata:1.3.1
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 7e61be6..42aa7ca 100644 (file)
@@ -23,7 +23,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/multicloud/openstack-pike:1.3.0-STAGING
+image: onap/multicloud/openstack-pike:1.3.1
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 2ca696c..723d77d 100644 (file)
@@ -23,7 +23,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/multicloud/openstack-starlingx:1.3.0-SNAPSHOT
+image: onap/multicloud/openstack-starlingx:1.3.1
 pullPolicy: Always
 
 #Istio sidecar injection policy
index ec8f276..0da23dd 100644 (file)
@@ -24,7 +24,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/multicloud/vio:1.3.0-STAGING
+image: onap/multicloud/vio:1.3.1
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 977347c..8a6acbd 100644 (file)
@@ -24,7 +24,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/multicloud/openstack-windriver:1.3.0-STAGING
+image: onap/multicloud/openstack-windriver:1.3.1
 pullPolicy: Always
 
 #Istio sidecar injection policy
index aa34496..6188e43 100644 (file)
@@ -28,7 +28,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/multicloud/framework:1.3.0-STAGING
+image: onap/multicloud/framework:1.3.1
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 9f9f166..1716415 100644 (file)
 ###################################################################
 # This override file enables helm charts for all ONAP applications.
 ###################################################################
+cassandra:
+  enabled: true
+mariadb-galera:
+  enabled: true
+
 aaf:
   enabled: true
 aai:
   enabled: true
 appc:
   enabled: true
-cassandra:
-  enabled: true
 clamp:
   enabled: true
 cli:
diff --git a/kubernetes/onap/resources/overrides/onap-vfw.yaml b/kubernetes/onap/resources/overrides/onap-vfw.yaml
new file mode 100644 (file)
index 0000000..323961f
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright © 2019 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###################################################################
+# This override file enables ONAP Application helm charts for the
+# vFW use case.
+###################################################################
+cassandra:
+  enabled: true
+mariadb-galera:
+  enabled: true
+
+aaf:
+  enabled: true
+aai:
+  enabled: true
+appc:
+  enabled: true
+clamp:
+  enabled: true
+consul:
+  enabled: true
+dcaegen2:
+  enabled: true
+dmaap:
+  enabled: true
+log:
+  enabled: true
+oof:
+  enabled: true
+msb:
+  enabled: true
+policy:
+  enabled: true
+portal:
+  enabled: true
+robot:
+  enabled: true
+sdc:
+  enabled: true
+sdnc:
+  enabled: true
+so:
+  enabled: true
\ No newline at end of file
diff --git a/kubernetes/onap/resources/overrides/openstack.yaml b/kubernetes/onap/resources/overrides/openstack.yaml
new file mode 100644 (file)
index 0000000..a8294d2
--- /dev/null
@@ -0,0 +1,62 @@
+# Copyright © 2019 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# This override file configures openstack parameters for ONAP
+#################################################################
+appc:
+  config:
+    enableClustering: false
+    openStackType: "OpenStackProvider"
+    openStackName: "OpenStack"
+    openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+    openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE"
+    openStackDomain: "Default"
+    openStackUserName: "OPENSTACK_USERNAME_HERE"
+    openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
+robot:
+  appcUsername: "appc@appc.onap.org"
+  appcPassword: "demo123456!"
+  openStackKeyStoneUrl: "http://10.12.25.2:5000"
+  openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4"
+  openStackTenantId: "09d8566ea45e43aa974cf447ed591d77"
+  openStackUserName: "OPENSTACK_USERNAME_HERE"
+  ubuntu14Image: "ubuntu-14-04-cloud-amd64"
+  ubuntu16Image: "ubuntu-16-04-cloud-amd64"
+  openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313"
+  openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3"
+  openStackPrivateNetCidr: "10.0.0.0/16"
+  openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0"
+  openStackOamNetworkCidrPrefix: "10.0"
+  dcaeCollectorIp: "10.12.6.88"
+  vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
+  demoArtifactsVersion: "1.4.0-SNAPSHOT"
+  demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
+  scriptVersion: "1.4.0-SNAPSHOT"
+  rancherIpAddress: "10.12.5.127"
+  config:
+    # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
+    openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
+so:
+  # so server configuration
+  so-catalog-db-adapter:
+    config:
+      openStackUserName: "OPENSTACK_USERNAME_HERE"
+      openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+      openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
+nbi:
+  config:
+    # openstack configuration
+    openStackRegion: "Yolo"
+    openStackVNFTenantId: "1234"
\ No newline at end of file
index b62606e..613a223 100644 (file)
@@ -112,7 +112,7 @@ nbi:
     openStackRegion: "Yolo"
     openStackVNFTenantId: "1234"
 nfs-provisioner:
-  enabled: true
+  enabled: false
 policy:
   enabled: false
 pomba:
index 841aa3a..e7c6928 100644 (file)
@@ -70,15 +70,15 @@ POLICY_PDP_PAP_API_SECRET=
 
 # PAP
 
-PAP_HOST={{.Values.global.pap.nameOverride}}
-PAP_USERNAME=testpap
-PAP_PASSWORD=alpha123
+PAP_HOST=policy-pap
+PAP_USERNAME=healthcheck
+PAP_PASSWORD=zb!XztG34
 
 # PDP-X
 
-PDP_HOST={{.Values.global.pdp.nameOverride}}
-PDP_USERNAME=testpdp
-PDP_PASSWORD=alpha123
+PDP_HOST=policy-xacml-pdp
+PDP_USERNAME=healthcheck
+PDP_PASSWORD=zb!XztG34
 PDP_CLIENT_USERNAME=python
 PDP_CLIENT_PASSWORD=test
 PDP_ENVIRONMENT=TEST
@@ -113,6 +113,6 @@ VFC_PASSWORD=
 
 # SDNC
 
-SDNC_URL=
-SDNC_USERNAME=
-SDNC_PASSWORD=
+SDNC_URL=http://sdnc.{{.Release.Namespace}}:8282/restconf/operations
+SDNC_USERNAME=admin
+SDNC_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
index 8edca60..9de4a8b 100644 (file)
@@ -11,6 +11,6 @@
         "timeIntervalMs": 120000,
         "pdpType":"apex",
         "description":"Pdp Heartbeat",
-        "supportedPolicyTypes":[{"name":"onap.policies.controlloop.Operational","version":"1.0.0"}]
+        "supportedPolicyTypes":[{"name":"onap.policies.controlloop.operational.Apex","version":"1.0.0"}]
     }
 }
index 16e0a51..397f850 100644 (file)
@@ -21,7 +21,9 @@
         "host":"0.0.0.0",
         "port":6969,
         "userName":"healthcheck",
-        "password":"zb!XztG34"
+        "password":"zb!XztG34",
+        "https": true,
+        "aaf": false
     },
     "databaseProviderParameters": {
         "name": "PolicyProviderParameterGroup",
@@ -32,4 +34,4 @@
         "databasePassword": "cG9saWN5X3VzZXI=",
         "persistenceUnit": "PolicyMariaDb"
     }
-}
\ No newline at end of file
+}
index 995c951..fa80bf2 100644 (file)
@@ -21,7 +21,9 @@
         "host":"0.0.0.0",
         "port":6969,
         "userName":"healthcheck",
-        "password":"zb!XztG34"
+        "password":"zb!XztG34",
+        "https": true,
+        "aaf": false
     },
     "pdpParameters": {
         "updateParameters": {
diff --git a/kubernetes/policy/charts/policy-xacml-pdp/resources/config/xacml.properties b/kubernetes/policy/charts/policy-xacml-pdp/resources/config/xacml.properties
new file mode 100644 (file)
index 0000000..f4b4f93
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Properties that the embedded PDP engine uses to configure and load
+#
+# Standard API Factories
+#
+xacml.dataTypeFactory=com.att.research.xacml.std.StdDataTypeFactory
+xacml.pdpEngineFactory=com.att.research.xacmlatt.pdp.ATTPDPEngineFactory
+xacml.pepEngineFactory=com.att.research.xacml.std.pep.StdEngineFactory
+xacml.pipFinderFactory=com.att.research.xacml.std.pip.StdPIPFinderFactory
+xacml.traceEngineFactory=com.att.research.xacml.std.trace.LoggingTraceEngineFactory
+#
+# AT&T PDP Implementation Factories
+#
+xacml.att.evaluationContextFactory=com.att.research.xacmlatt.pdp.std.StdEvaluationContextFactory
+xacml.att.combiningAlgorithmFactory=com.att.research.xacmlatt.pdp.std.StdCombiningAlgorithmFactory
+xacml.att.functionDefinitionFactory=com.att.research.xacmlatt.pdp.std.StdFunctionDefinitionFactory
+#
+# ONAP PDP Implementation Factories
+#
+xacml.att.policyFinderFactory=org.onap.policy.pdp.xacml.application.common.OnapPolicyFinderFactory
+
+#
+# Use a root combining algorithm
+#
+xacml.att.policyFinderFactory.combineRootPolicies=urn:oasis:names:tc:xacml:3.0:policy-combining-algorithm:deny-overrides
+
+#
+# PIP Engine Definitions
+#
+count-recent-operations.classname=org.onap.policy.pdp.xacml.application.common.operationshistory.CountRecentOperationsPip
+count-recent-operations.issuer=urn:org:onap:xacml:guard:count-recent-operations
+count-recent-operations.name=CountRecentOperations
+count-recent-operations.description=Returns operation counts based on time window
+count-recent-operations.persistenceunit=OperationsHistoryPU
+
+get-operation-outcome.classname=org.onap.policy.pdp.xacml.application.common.operationshistory.GetOperationOutcomePip
+get-operation-outcome.issuer=urn:org:onap:xacml:guard:get-operation-outcome
+get-operation-outcome.name=GetOperationOutcome
+get-operation-outcome.description=Returns operation outcome
+get-operation-outcome.persistenceunit=OperationsHistoryPU
+
+#
+# Make pips available to finder
+#
+xacml.pip.engines=count-recent-operations,get-operation-outcome
+
+#
+# JPA Properties
+#
+javax.persistence.jdbc.driver=org.mariadb.jdbc.Driver
+javax.persistence.jdbc.url=jdbc:mariadb://{{ .Values.global.mariadb.nameOverride }}:3306/operationshistory
+javax.persistence.jdbc.user=policy_user
+javax.persistence.jdbc.password=cG9saWN5X3VzZXI=
\ No newline at end of file
index 1cd9290..85fda35 100644 (file)
@@ -135,8 +135,8 @@ onap_application_name=
 
 #-----------------------ONAP-PORTAL-Properties----------------------
 
-ONAP_REDIRECT_URL=https://portal-app.{{.Release.Namespace}}:30225/ONAPPORTAL/login.htm
-ONAP_REST_URL=https://portal-app:8443/ONAPPORTAL/auxapi
+ONAP_REDIRECT_URL=https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm
+ONAP_REST_URL=https://portal-app:30225/ONAPPORTAL/auxapi
 ONAP_UEB_URL_LIST=
 ONAP_PORTAL_INBOX_NAME=
 ONAP_UEB_APP_KEY=ueb_key_5
index a9a8687..ddb352b 100644 (file)
@@ -197,51 +197,21 @@ curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'A
 
 #########################################Create SDNC Naming Policies##########################################
 
-echo "Create SDNC Naming Policies"
+echo "Create Generic SDNC Naming Policy for VNF"
 
 sleep 2
 
 echo "Create SDNC vFW Naming Policy"
 curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-    "configBody": "{\"service\":\"SDNC-GenerateName\",\"version\":\"CSIT\",\"content\":{\"policy-instance-name\":\"ONAP_VFW_NAMING_TIMESTAMP\",\"naming-models\":[{\"naming-properties\":[{\"property-name\":\"AIC_CLOUD_REGION\"},{\"property-name\":\"nfRole\"},{\"property-name\":\"TIMESTAMP\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"VNF\",\"nfRole\":\"vFW\",\"naming-recipe\":\"AIC_CLOUD_REGION|DELIMITER|nfRole|DELIMITER|TIMESTAMP\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-name\":\"SEQUENCE\",\"increment-sequence\":{\"max\":\"zzz\",\"scope\":\"ENTIRETY\",\"start-value\":\"001\",\"length\":\"3\",\"increment\":\"1\",\"sequence-type\":\"alpha-numeric\"}},{\"property-name\":\"NFC_NAMING_CODE\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"VNFC\",\"nfRole\":\"vFW\",\"naming-recipe\":\"VNF_NAME|DELIMITER|NFC_NAMING_CODE|DELIMITER|SEQUENCE\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"},{\"property-name\":\"VF_MODULE_LABEL\"},{\"property-name\":\"VF_MODULE_TYPE\"},{\"property-name\":\"SEQUENCE\",\"increment-sequence\":{\"max\":\"zzz\",\"scope\":\"PRECEEDING\",\"start-value\":\"01\",\"length\":\"3\",\"increment\":\"1\",\"sequence-type\":\"alpha-numeric\"}}],\"naming-type\":\"VF-MODULE\",\"nfRole\":\"vFW\",\"naming-recipe\":\"VNF_NAME|DELIMITER|VF_MODULE_LABEL|DELIMITER|VF_MODULE_TYPE|DELIMITER|SEQUENCE\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"}],\"naming-type\":\"KEY\",\"nfRole\":\"vFW\",\"naming-recipe\":\"VNF_NAME\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"protected\",\"property-name\":\"CONSTANT\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"protected_private_net_id\",\"nfRole\":\"vFW\",\"naming-recipe\":\"VNF_NAME|DELIMITER|CONSTANT\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"unprotected\",\"property-name\":\"CONSTANT\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"unprotected_private_net_id\",\"nfRole\":\"vFW\",\"naming-recipe\":\"VNF_NAME|DELIMITER|CONSTANT\"}]}}",
-    "policyName": "SDNC_Policy.ONAP_VFW_NAMING_TIMESTAMP",
+    "configBody": "{ \"service\": \"SDNC-GenerateName\", \"version\": \"CSIT\", \"content\": { \"policy-instance-name\": \"ONAP_VNF _NAMING_TIMESTAMP\", \"naming-models\": [ { \"naming-properties\": [ { \"property-name\": \"AIC_CLOUD_REGION\" }, { \"property-name\": \"nfRole\" }, { \"property-name\": \"TIMESTAMP\" }, { \"property-value\": \"_\", \"property-name\": \"DELIMITER\" } ], \"naming-type\": \"VNF\", \"naming-recipe\": \"AIC_CLOUD_REGION|DELIMITER|nfRole|DELIMITER|TIMESTAMP\" }, { \"naming-properties\": [ { \"property-name\": \"VNF_NAME\" }, { \"property-name\": \"SEQUENCE\", \"increment-sequence\": { \"max\": \"zzz\", \"scope\": \"ENTIRETY\", \"start-value\": \"001\", \"length\": \"3\", \"increment\": \"1\", \"sequence-type\": \"alpha-numeric\" } }, { \"property-name\": \"NFC_NAMING_CODE\" }, { \"property-value\": \"_\", \"property-name\": \"DELIMITER\" } ], \"naming-type\": \"VNFC\", \"naming-recipe\": \"VNF_NAME|DELIMITER|NFC_NAMING_CODE|DELIMITER|SEQUENCE\" }, { \"naming-properties\": [ { \"property-name\": \"VNF_NAME\" }, { \"property-value\": \"_\", \"property-name\": \"DELIMITER\" }, { \"property-name\": \"VF_MODULE_LABEL\" }, { \"property-name\": \"VF_MODULE_TYPE\" }, { \"property-name\": \"SEQUENCE\", \"increment-sequence\": { \"max\": \"zzz\", \"scope\": \"PRECEEDING\", \"start-value\": \"01\", \"length\": \"3\", \"increment\": \"1\", \"sequence-type\": \"alpha-numeric\" } } ], \"naming-type\": \"VF-MODULE\", \"naming-recipe\": \"VNF_NAME|DELIMITER|VF_MODULE_LABEL|DELIMITER|VF_MODULE_TYPE|DELIMITER|SEQUENCE\" } ] } }",
+    "policyName": "SDNC_Policy.ONAP_VNF_NAMING_TIMESTAMP",
     "policyConfigType": "MicroService",
     "onapName": "SDNC",
     "riskLevel": "4",
     "riskType": "test",
     "guard": "false",
     "priority": "4",
-    "description": "ONAP_VFW_NAMING_TIMESTAMP"
-}' 'https://{{.Values.global.pdp.nameOverride}}:{{.Values.config.pdpPort}}/pdp/api/createPolicy'
-
-sleep 2
-
-echo "Create SDNC vPG Naming Policy"
-curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-    "configBody": "{\"service\":\"SDNC-GenerateName\",\"version\":\"CSIT\",\"content\":{\"policy-instance-name\":\"ONAP_VPG_NAMING_TIMESTAMP\",\"naming-models\":[{\"naming-properties\":[{\"property-name\":\"AIC_CLOUD_REGION\"},{\"property-name\":\"nfRole\"},{\"property-name\":\"TIMESTAMP\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"VNF\",\"nfRole\":\"vPG\",\"naming-recipe\":\"AIC_CLOUD_REGION|DELIMITER|nfRole|DELIMITER|TIMESTAMP\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-name\":\"SEQUENCE\",\"increment-sequence\":{\"max\":\"zzz\",\"scope\":\"ENTIRETY\",\"start-value\":\"001\",\"length\":\"3\",\"increment\":\"1\",\"sequence-type\":\"alpha-numeric\"}},{\"property-name\":\"NFC_NAMING_CODE\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"VNFC\",\"nfRole\":\"vPG\",\"naming-recipe\":\"VNF_NAME|DELIMITER|NFC_NAMING_CODE|DELIMITER|SEQUENCE\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"},{\"property-name\":\"VF_MODULE_LABEL\"},{\"property-name\":\"VF_MODULE_TYPE\"},{\"property-name\":\"SEQUENCE\",\"increment-sequence\":{\"max\":\"zzz\",\"scope\":\"PRECEEDING\",\"start-value\":\"01\",\"length\":\"3\",\"increment\":\"1\",\"sequence-type\":\"alpha-numeric\"}}],\"naming-type\":\"VF-MODULE\",\"nfRole\":\"vPG\",\"naming-recipe\":\"VNF_NAME|DELIMITER|VF_MODULE_LABEL|DELIMITER|VF_MODULE_TYPE|DELIMITER|SEQUENCE\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"}],\"naming-type\":\"KEY\",\"nfRole\":\"vPG\",\"naming-recipe\":\"VNF_NAME\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"protected\",\"property-name\":\"CONSTANT\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"protected_private_net_id\",\"nfRole\":\"vPG\",\"naming-recipe\":\"VNF_NAME|DELIMITER|CONSTANT\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"unprotected\",\"property-name\":\"CONSTANT\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"unprotected_private_net_id\",\"nfRole\":\"vPG\",\"naming-recipe\":\"VNF_NAME|DELIMITER|CONSTANT\"}]}}",
-    "policyName": "SDNC_Policy.ONAP_VPG_NAMING_TIMESTAMP",
-    "policyConfigType": "MicroService",
-    "onapName": "SDNC",
-    "riskLevel": "4",
-    "riskType": "test",
-    "guard": "false",
-    "priority": "4",
-    "description": "ONAP_VPG_NAMING_TIMESTAMP"
-}' 'https://{{.Values.global.pdp.nameOverride}}:{{.Values.config.pdpPort}}/pdp/api/createPolicy'
-
-sleep 2
-
-echo "Create SDNC vSN Naming Policy"
-curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-    "configBody": "{\"service\":\"SDNC-GenerateName\",\"version\":\"CSIT\",\"content\":{\"policy-instance-name\":\"ONAP_VSN_NAMING_TIMESTAMP\",\"naming-models\":[{\"naming-properties\":[{\"property-name\":\"AIC_CLOUD_REGION\"},{\"property-name\":\"nfRole\"},{\"property-name\":\"TIMESTAMP\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"VNF\",\"nfRole\":\"vSN\",\"naming-recipe\":\"AIC_CLOUD_REGION|DELIMITER|nfRole|DELIMITER|TIMESTAMP\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-name\":\"SEQUENCE\",\"increment-sequence\":{\"max\":\"zzz\",\"scope\":\"ENTIRETY\",\"start-value\":\"001\",\"length\":\"3\",\"increment\":\"1\",\"sequence-type\":\"alpha-numeric\"}},{\"property-name\":\"NFC_NAMING_CODE\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"VNFC\",\"nfRole\":\"vSN\",\"naming-recipe\":\"VNF_NAME|DELIMITER|NFC_NAMING_CODE|DELIMITER|SEQUENCE\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"},{\"property-name\":\"VF_MODULE_LABEL\"},{\"property-name\":\"VF_MODULE_TYPE\"},{\"property-name\":\"SEQUENCE\",\"increment-sequence\":{\"max\":\"zzz\",\"scope\":\"PRECEEDING\",\"start-value\":\"01\",\"length\":\"3\",\"increment\":\"1\",\"sequence-type\":\"alpha-numeric\"}}],\"naming-type\":\"VF-MODULE\",\"nfRole\":\"vSN\",\"naming-recipe\":\"VNF_NAME|DELIMITER|VF_MODULE_LABEL|DELIMITER|VF_MODULE_TYPE|DELIMITER|SEQUENCE\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"}],\"naming-type\":\"KEY\",\"nfRole\":\"vSN\",\"naming-recipe\":\"VNF_NAME\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"protected\",\"property-name\":\"CONSTANT\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"protected_private_net_id\",\"nfRole\":\"vSN\",\"naming-recipe\":\"VNF_NAME|DELIMITER|CONSTANT\"},{\"naming-properties\":[{\"property-name\":\"VNF_NAME\"},{\"property-value\":\"unprotected\",\"property-name\":\"CONSTANT\"},{\"property-value\":\"_\",\"property-name\":\"DELIMITER\"}],\"naming-type\":\"unprotected_private_net_id\",\"nfRole\":\"vSN\",\"naming-recipe\":\"VNF_NAME|DELIMITER|CONSTANT\"}]}}",
-    "policyName": "SDNC_Policy.ONAP_VSN_NAMING_TIMESTAMP",
-    "policyConfigType": "MicroService",
-    "onapName": "SDNC",
-    "riskLevel": "4",
-    "riskType": "test",
-    "guard": "false",
-    "priority": "4",
-    "description": "ONAP_VSN_NAMING_TIMESTAMP"
+    "description": "ONAP_VNF_NAMING_TIMESTAMP"
 }' 'https://{{.Values.global.pdp.nameOverride}}:{{.Values.config.pdpPort}}/pdp/api/createPolicy'
 
 #########################################Creating OOF PCI Policies##########################################
@@ -479,28 +449,10 @@ echo "Pushing SDNC Naming Policies"
 
 sleep 2
 
-echo "pushPolicy : PUT : SDNC_Policy.ONAP_VFW_NAMING_TIMESTAMP"
-curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "SDNC_Policy.ONAP_VFW_NAMING_TIMESTAMP",
-  "policyType": "MicroService"
-}' 'https://{{.Values.global.pdp.nameOverride}}:{{.Values.config.pdpPort}}/pdp/api/pushPolicy'
-
-sleep 10
-
-echo "pushPolicy : PUT : SDNC_Policy.ONAP_VPG_NAMING_TIMESTAMP"
-curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "SDNC_Policy.ONAP_VPG_NAMING_TIMESTAMP",
-  "policyType": "MicroService"
-}' 'https://{{.Values.global.pdp.nameOverride}}:{{.Values.config.pdpPort}}/pdp/api/pushPolicy'
-
-sleep 10
-
-echo "pushPolicy : PUT : SDNC_Policy.ONAP_VSN_NAMING_TIMESTAMP"
+echo "pushPolicy : PUT : SDNC_Policy.ONAP_VNF_NAMING_TIMESTAMP"
 curl -k -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
   "pdpGroup": "default",
-  "policyName": "SDNC_Policy.ONAP_VSN_NAMING_TIMESTAMP",
+  "policyName": "SDNC_Policy.ONAP_VNF_NAMING_TIMESTAMP",
   "policyType": "MicroService"
 }' 'https://{{.Values.global.pdp.nameOverride}}:{{.Values.config.pdpPort}}/pdp/api/pushPolicy'
 
index 31b8643..8b76f64 100755 (executable)
@@ -41,6 +41,10 @@ function usage
        echo "       demo-k8s.sh <namespace> instantiateVFW"
        echo "               - Instantiate vFW module for the demo customer (DemoCust<uuid>)"
        echo " "
+       echo "       demo-k8s.sh <namespace> instantiateVFWdirectso  csar_filename"
+       echo "               - Instantiate vFW module using direct SO interface using previously distributed model "
+        echo "                 that is in /tmp/csar in robot container"
+       echo " "
        echo "       demo-k8s.sh <namespace> deleteVNF <module_name from instantiateVFW>"
        echo "               - Delete the module created by instantiateVFW"
        echo " "
@@ -134,6 +138,16 @@ do
                        VARIABLES="$VARIABLES -v GLOBAL_BUILD_NUMBER:$$"
                        shift
                        ;;
+        instantiateVFWdirectso)
+                        TAG="instantiateVFWdirectso"
+                        shift
+                        if [ $# -ne 1 ];then
+                                        echo "Usage: demo-k8s.sh <namespace> instantiateVFWdirectso <csar_filename>"
+                                        exit
+                                fi
+                        VARIABLES="$VARIABLES -v CSAR_FILE:$1 -v GLOBAL_BUILD_NUMBER:$$"
+                        shift
+                        ;;
        deleteVNF)
                        TAG="deleteVNF"
                        shift
index aaa3bc8..5d42f04 100755 (executable)
 #!/bin/bash
 
 #
-# Run the testsuite for the passed tag. Valid tags are ete, health, closedloop, instantiate
+# Run the testsuite for the passed tag. Valid tags are listed in usage help
 # Please clean up logs when you are done...
-# Note: Do not run multiple concurrent ete.sh as the --display is not parameterized and tests will collide
 #
 if [ "$1" == "" ] || [ "$2" == "" ]; then
-   echo "Usage: ete-k8s.sh [namespace] [ health | healthdist | distribute | instantiate | instantiateVFWCL | instantiateDemoVFWCL |  | portal ]"
+   echo "Usage: ete-k8s.sh [namespace] [tag]"
+   echo ""
+   echo "  List of test case tags (filename for intent: tag)"
+   echo ""
+   echo "  cds.robot: cds"
+   echo ""
+   echo "  clamp.robot: clamp"
+   echo ""
+   echo "  demo.robot: InitDemo, InitCustomer, APPCCDTPreloadDemo, APPCMountPointDemo, DistributeDemoVFWDT, DistributeVFWNG,"
+   echo "              InitDistribution, PreloadDemo, deleteVNF, heatbridge, instantiateDemoVFWCL, instantiateVFW, instantiateVFWCL, instantiateVFWDT"
+   echo ""
+   echo "  health-check.robot: health, core, small, medium, 3rdparty, api, datarouter, externalapi, health-aaf, health-aai, health-appc,"
+   echo "                      health-clamp, health-cli, health-dcae, health-dmaap, health-log, health-modeling, health-msb,"
+   echo "                      health-multicloud, health-oof, health-policy, health-pomba, health-portal, health-sdc, health-sdnc,"
+   echo "                      health-so, health-uui, health-vfc, health-vid, health-vnfsdk, healthdist, healthlogin, healthmr,"
+   echo "                      healthportalapp, multicloud, oom"
+   echo ""
+   echo " hvves.robot: HVVES, ete"
+   echo ""
+   echo " model-distribution-vcpe.robot: distributevCPEResCust"
+   echo ""
+   echo " model-distribution.robot: distribute, distributeVFWDT, distributeVLB"
+   echo ""
+   echo " oof-*.robot: cmso, has, homing"
+   echo ""
+   echo " pnf-registration.robot: ete, pnf_registrate"
+   echo ""
+   echo " post-install-tests.robot dmaapacl, postinstall"
+   echo ""
+   echo " update_onap_page.robot: UpdateWebPage"
+   echo ""
+   echo " vnf-orchestration-direct-so.robot: instantiateVFWdirectso"
+   echo ""
+   echo " vnf-orchestration.robot: instantiate, instantiateNoDelete, stability72hr"
    exit
 fi
 
@@ -30,7 +62,6 @@ export NAMESPACE="$1"
 
 POD=$(kubectl --namespace $NAMESPACE get pods | sed 's/ .*//'| grep robot)
 
-
 TAGS="-i $2"
 
 ETEHOME=/var/opt/ONAP
index 02b79f3..c58d8a8 100755 (executable)
 #!/bin/bash
 
 #
-# Run the testsuite for the passed tag. Valid tags are ete, health, closedloop, instantiate
+# Run the health-check testsuites for the tags discovered by helm list
 # Please clean up logs when you are done...
-# Note: Do not run multiple concurrent ete.sh as the --display is not parameterized and tests will collide
 #
 if [ "$1" == "" ] ;  then
-   echo "Usage: eteHelm-k8s.sh namespace  "
-   echo " list projects via helm list and runs health-check with those tags except dev and dev-consul "
+   echo "Usage: eteHelm-k8s.sh [namespace]"
+   echo " list projects via helm list and runs health-check with those tags except dev and dev-consul"
    exit
 fi
 
index 6ea4930..453af83 100644 (file)
 # limitations under the License.
 
 # aaf info - everything is from the private oam network (also called onap private network)
-GLOBAL_AAF_SERVER = "https://aaf-service.{{include "common.namespace" .}}:8100"
-GLOBAL_AAF_USERNAME = "{{ .Values.aafUsername }}"
-GLOBAL_AAF_PASSWORD = "{{ .Values.aafPassword }}"
+GLOBAL_AAF_SERVER = 'https://aaf-service.{{include "common.namespace" .}}:8100'
+GLOBAL_AAF_USERNAME = '{{ .Values.aafUsername }}'
+GLOBAL_AAF_PASSWORD = '{{ .Values.aafPassword }}'
 # aai info - everything is from the private oam network (also called onap private network)
 GLOBAL_AAI_SERVER_PROTOCOL = "https"
 GLOBAL_AAI_SERVER_PORT = "8443"
-GLOBAL_AAI_USERNAME = "{{ .Values.aaiUsername }}"
-GLOBAL_AAI_PASSWORD = "{{ .Values.aaiPassword}}"
+GLOBAL_AAI_USERNAME = '{{ .Values.aaiUsername }}'
+GLOBAL_AAI_PASSWORD = '{{ .Values.aaiPassword}}'
 # appc info - everything is from the private oam network (also called onap private network)
 GLOBAL_APPC_SERVER_PROTOCOL = "http"
 GLOBAL_APPC_SERVER_PORT = "8282"
-GLOBAL_APPC_USERNAME = "{{ .Values.appcUsername }}"
-GLOBAL_APPC_PASSWORD = "{{ .Values.appcPassword }}"
+GLOBAL_APPC_USERNAME = '{{ .Values.appcUsername }}'
+GLOBAL_APPC_PASSWORD = '{{ .Values.appcPassword }}'
 GLOBAL_APPC_CDT_SERVER_PROTOCOL = "https"
 GLOBAL_APPC_CDT_SERVER_PORT = "18080"
 GLOBAL_APPC_CDT_USERNAME = "demo"
@@ -48,17 +48,28 @@ GLOBAL_CLI_SERVER_PORT = "8080"
 # dcae info - everything is from the private oam network (also called onap private network)
 GLOBAL_DCAE_SERVER_PROTOCOL = "http"
 GLOBAL_DCAE_HEALTH_SERVER_PORT = "80"
-GLOBAL_DCAE_USERNAME = "{{ .Values.dcaeUsername }}"
-GLOBAL_DCAE_PASSWORD = "{{ .Values.dcaePassword}}"
+GLOBAL_DCAE_USERNAME = '{{ .Values.dcaeUsername }}'
+GLOBAL_DCAE_PASSWORD = '{{ .Values.dcaePassword}}'
+# dcae hv-ves info
+GLOBAL_DCAE_HVVES_SERVER_NAME = 'dcae-hv-ves-collector.{{include "common.namespace" .}}'
+GLOBAL_DCAE_HVVES_SERVER_PORT = "6061"
 # data router info - everything is from the private oam network (also called onap private network)
 GLOBAL_DMAAP_DR_PROV_SERVER_PROTOCOL = "http"
 GLOBAL_DMAAP_DR_PROV_SERVER_PORT = "8080"
 GLOBAL_DMAAP_DR_NODE_SERVER_PROTOCOL = "http"
 GLOBAL_DMAAP_DR_NODE_SERVER_PORT = "8080"
+# dmaap message router info
+GLOBAL_DMAAP_MESSAGE_ROUTER_SERVER_NAME = 'message-router.{{include "common.namespace" .}}'
+GLOBAL_DMAAP_MESSAGE_ROUTER_SERVER_PORT = "3904"
+# dmaap kafka info
+GLOBAL_DMAAP_KAFKA_SERVER_NAME = 'message-router-kafka.{{include "common.namespace" .}}'
+GLOBAL_DMAAP_KAFKA_SERVER_PORT = "9092"
+GLOBAL_DMAAP_KAFKA_JAAS_USERNAME = '{{ .Values.kafkaJaasUsername }}'
+GLOBAL_DMAAP_KAFKA_JAAS_PASSWORD = '{{ .Values.kafkaJaasPassword }}'
 # DROOL server port and credentials
 GLOBAL_DROOLS_SERVER_PORT = "9696"
-GLOBAL_DROOLS_USERNAME = "{{ .Values.droolsUsername }}"
-GLOBAL_DROOLS_PASSWORD = "{{ .Values.droolsPassword }}"
+GLOBAL_DROOLS_USERNAME = '{{ .Values.droolsUsername }}'
+GLOBAL_DROOLS_PASSWORD = '{{ .Values.droolsPassword }}'
 # log server config - NOTE: no log server is run in HEAT; only on OOM
 GLOBAL_LOG_SERVER_PROTOCOL = "http"
 GLOBAL_LOG_ELASTICSEARCH_PORT = "9200"
@@ -86,8 +97,8 @@ GLOBAL_MR_SERVER_PROTOCOL = "http"
 GLOBAL_MR_SERVER_PORT = "3904"
 # bus controller info
 GLOBAL_BC_HTTPS_SERVER_PORT = "8443"
-GLOBAL_BC_USERNAME = "{{ .Values.bcUsername }}"
-GLOBAL_BC_PASSWORD = "{{ .Values.bcPassword }}"
+GLOBAL_BC_USERNAME = '{{ .Values.bcUsername }}'
+GLOBAL_BC_PASSWORD = '{{ .Values.bcPassword }}'
 # mso info - everything is from the private oam network (also called onap private network)
 GLOBAL_MSO_SERVER_PROTOCOL = "http"
 GLOBAL_MSO_SERVER_PORT = "8080"
@@ -101,9 +112,9 @@ GLOBAL_MSO_REQDB_SERVER_PORT = "8083"
 GLOBAL_MSO_SDNC_SERVER_PORT =  "8086"
 GLOBAL_MSO_VFC_SERVER_PORT = "8084"
 GLOBAL_MSO_VNFM_SERVER_PORT = "9092"
-GLOBAL_MSO_USERNAME = "{{ .Values.soUsername }}"
-GLOBAL_MSO_CATDB_USERNAME = "{{ .Values.soCatdbUsername }}"
-GLOBAL_MSO_PASSWORD = "{{ .Values.soPassword }}"
+GLOBAL_MSO_USERNAME = '{{ .Values.soUsername }}'
+GLOBAL_MSO_CATDB_USERNAME = '{{ .Values.soCatdbUsername }}'
+GLOBAL_MSO_PASSWORD = '{{ .Values.soPassword }}'
 # robot uses MSO_PASSWORD for both SO and CATDB
 # music info - everything is from the private oam network (also called onap private network)
 GLOBAL_MUSIC_SERVER_PROTOCOL = "http"
@@ -129,32 +140,34 @@ GLOBAL_PGN_PORT = "2831"
 GLOBAL_POLICY_SERVER_PROTOCOL = "https"
 GLOBAL_POLICY_SERVER_PORT = "8081"
 GLOBAL_POLICY_HEALTHCHECK_PORT = "6969"
-GLOBAL_POLICY_AUTH = "{{ .Values.policyAuth}}"
-GLOBAL_POLICY_CLIENTAUTH = "{{ .Values.policyClientAuth}}"
-GLOBAL_POLICY_USERNAME = "{{ .Values.policyUsername }}"
-GLOBAL_POLICY_PASSWORD = "{{ .Values.policyPassword }}"
+GLOBAL_POLICY_AUTH = '{{ .Values.policyAuth}}'
+GLOBAL_POLICY_CLIENTAUTH = '{{ .Values.policyClientAuth}}'
+GLOBAL_POLICY_USERNAME = '{{ .Values.policyUsername }}'
+GLOBAL_POLICY_PASSWORD = '{{ .Values.policyPassword }}'
+GLOBAL_POLICY_HEALTHCHECK_USERNAME = '{{ .Values.policyComponentUsername }}'
+GLOBAL_POLICY_HEALTHCHECK_PASSWORD = '{{ .Values.policyComponentPassword }}'
 # portal info - everything is from the private oam network (also called onap private network)
 GLOBAL_PORTAL_SERVER_PROTOCOL = "http"
 GLOBAL_PORTAL_SERVER_PORT = "8989"
-GLOBAL_PORTAL_USERNAME = "{{ .Values.portalUsername }}"
-GLOBAL_PORTAL_PASSWORD = "{{ .Values.portalPassword }}"
+GLOBAL_PORTAL_USERNAME = '{{ .Values.portalUsername }}'
+GLOBAL_PORTAL_PASSWORD = '{{ .Values.portalPassword }}'
 # sdngc info - everything is from the private oam network (also called onap private network)
 GLOBAL_SDNGC_SERVER_PROTOCOL = "http"
 GLOBAL_SDNGC_REST_PORT = "8282"
 GLOBAL_SDNGC_ADMIN_PORT = "8843"
-GLOBAL_SDNGC_USERNAME = "{{ .Values.sdncUsername }}"
-GLOBAL_SDNGC_PASSWORD = "{{ .Values.sdncPassword }}"
+GLOBAL_SDNGC_USERNAME = '{{ .Values.sdncUsername }}'
+GLOBAL_SDNGC_PASSWORD = '{{ .Values.sdncPassword }}'
 # sms (AAF)  info
 GLOBAL_SMS_SERVER_PROTOCOL = "https"
-GLOBAL_SMS_SERVER_NAME = "aaf-sms.{{include "common.namespace" .}}"
+GLOBAL_SMS_SERVER_NAME = 'aaf-sms.{{include "common.namespace" .}}'
 GLOBAL_SMS_SERVER_PORT = "10443"
 # vid info - everything is from the private oam network (also called onap private network)
-GLOBAL_VID_SERVER_PROTOCOL = "{{ .Values.vidServerProtocol }}"
-GLOBAL_VID_SERVER_PORT = "{{ .Values.vidServerPort }}"
-GLOBAL_VID_USERNAME = "{{ .Values.vidUsername }}"
-GLOBAL_VID_PASSWORD = "{{ .Values.vidPassword}}"
-GLOBAL_VID_HEALTH_USERNAME = "{{ .Values.vidHealthUsername }}"
-GLOBAL_VID_HEALTH_PASSWORD = "{{ .Values.vidHealthPassword }}"
+GLOBAL_VID_SERVER_PROTOCOL = '{{ .Values.vidServerProtocol }}'
+GLOBAL_VID_SERVER_PORT = '{{ .Values.vidServerPort }}'
+GLOBAL_VID_USERNAME = '{{ .Values.vidUsername }}'
+GLOBAL_VID_PASSWORD = '{{ .Values.vidPassword}}'
+GLOBAL_VID_HEALTH_USERNAME = '{{ .Values.vidHealthUsername }}'
+GLOBAL_VID_HEALTH_PASSWORD = '{{ .Values.vidHealthPassword }}'
 # vnfsdk info - everything is from the private oam network (also called onap private network)
 GLOBAL_VNFSDK_SERVER_PROTOCOL = "http"
 GLOBAL_VNFSDK_SERVER_PORT = "8702"
index 7d42fd5..e46b5fc 100644 (file)
 
 # File generated from /opt/config
 #
-GLOBAL_INJECTED_AAF_IP_ADDR = "aaf-service.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_AAI1_IP_ADDR = "aai.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_AAI2_IP_ADDR = "N/A"
-GLOBAL_INJECTED_APPC_IP_ADDR = "appc.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_APPC_CDT_IP_ADDR = "appc-cdt.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_ARTIFACTS_VERSION = "{{.Values.demoArtifactsVersion}}"
-GLOBAL_INJECTED_CLAMP_IP_ADDR = "clamp.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_CLI_IP_ADDR = "cli.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_CLOUD_ENV = "openstack"
-GLOBAL_INJECTED_DCAE_IP_ADDR = "dcae-healthcheck.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_DMAAP_DR_PROV_IP_ADDR = "dmaap-dr-prov.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_DMAAP_DR_NODE_IP_ADDR = "dmaap-dr-node.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_DNS_IP_ADDR = "N/A"
-GLOBAL_INJECTED_DOCKER_VERSION = "1.2-STAGING-latest"
-GLOBAL_INJECTED_EXTERNAL_DNS = "N/A"
-GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR = "log-es.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR = "log-kibana.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_LOG_LOGSTASH_IP_ADDR = "log-ls-http.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_AAI_CONTEXT_BUILDER_IP_ADDR = "pomba-aaictxbuilder.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_SDC_CONTEXT_BUILDER_IP_ADDR = "pomba-sdcctxbuilder.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_NETWORK_DISC_CONTEXT_BUILDER_IP_ADDR = "pomba-networkdiscoveryctxbuilder.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_SERVICE_DECOMPOSITION_IP_ADDR = "pomba-servicedecomposition.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_SDNC_CTX_BUILDER_IP_ADDR = "pomba-sdncctxbuilder.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_NETWORKDISCOVERY_MICROSERVICE_IP_ADDR = "pomba-networkdiscovery.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_VALIDATION_SERVICE_IP_ADDR = "pomba-validation-service.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_KIBANA_IP_ADDR = "pomba-kibana.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_ELASTIC_SEARCH_IP_ADDR = "pomba-es.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POMBA_CONTEX_TAGGREGATOR_IP_ADDR = "pomba-contextaggregator.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_KEYSTONE = "{{ .Values.openStackKeyStoneUrl }}"
-GLOBAL_INJECTED_MR_IP_ADDR = "message-router.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_BC_IP_ADDR = "dmaap-bc.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_MUSIC_IP_ADDR = "music.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_NBI_IP_ADDR = "nbi.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_NETWORK = "{{ .Values.openStackPrivateNetId }}"
-GLOBAL_INJECTED_NEXUS_DOCKER_REPO = "nexus3.onap.org:10001"
-GLOBAL_INJECTED_NEXUS_PASSWORD = "docker"
-GLOBAL_INJECTED_NEXUS_REPO = "https://nexus.onap.org/content/sites/raw"
-GLOBAL_INJECTED_NEXUS_USERNAME = "docker"
-GLOBAL_INJECTED_OOF_IP_ADDR = "N/A"
-GLOBAL_INJECTED_OOF_HOMING_IP_ADDR = "oof-has-api.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_OOF_SNIRO_IP_ADDR = "oof-osdf.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_OOF_CMSO_IP_ADDR = "oof-cmso.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_MSB_IP_ADDR = "msb-iag.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_OPENSTACK_API_KEY = "{{ .Values.config.openStackEncryptedPasswordHere}}"
-GLOBAL_INJECTED_OPENSTACK_PASSWORD = "{{ .Values.openStackPassword }}"
-GLOBAL_INJECTED_OPENSTACK_TENANT_ID = "{{ .Values.openStackTenantId }}"
-GLOBAL_INJECTED_OPENSTACK_USERNAME = "{{ .Values.openStackUserName }}"
-GLOBAL_INJECTED_OPENSTACK_PROJECT_NAME = "{{ .Values.openStackProjectName }}"
-GLOBAL_INJECTED_OPENSTACK_DOMAIN_ID = "{{ .Values.openStackDomainId }}"
-GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION = "{{ .Values.openStackKeystoneAPIVersion }}"
-GLOBAL_INJECTED_REGION_THREE = "{{ .Values.openStackRegionRegionThree }}"
-GLOBAL_INJECTED_KEYSTONE_REGION_THREE = "{{ .Values.openStackKeyStoneUrlRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION_REGION_THREE = "{{ .Values.openStackKeystoneAPIVersionRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_USERNAME_REGION_THREE = "{{ .Values.openStackUserNameRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_PASSWORD_REGION_THREE = "{{ .Values.openStackPasswordRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_MSO_ENCRYPTED_PASSWORD_REGION_THREE  = "{{ .Values.openSackMsoEncryptdPasswordRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_TENANT_ID_REGION_THREE = "{{ .Values.openStackTenantIdRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE = "{{ .Values.openStackProjectNameRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE = "{{ .Values.openStackDomainIdRegionThree }}"
-GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX = "{{ .Values.openStackOamNetworkCidrPrefix }}"
-GLOBAL_INJECTED_POLICY_IP_ADDR = "pdp.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR = "drools.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_PORTAL_IP_ADDR = "portal-app.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_PUBLIC_NET_ID = "{{ .Values.openStackPublicNetId }}"
-GLOBAL_INJECTED_REGION = "{{ .Values.openStackRegion }}"
-GLOBAL_INJECTED_SCRIPT_VERSION = "{{ .Values.scriptVersion }}"
-GLOBAL_INJECTED_SDC_BE_IP_ADDR = "sdc-be.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR = "sdc-onboarding-be.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SDC_FE_IP_ADDR = "sdc-fe.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SDC_IP_ADDR = "N/A"
-GLOBAL_INJECTED_SDNC_IP_ADDR = "sdnc.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR = "sdnc-portal.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_APIHAND_IP_ADDR = "so.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_ASDCHAND_IP_ADDR = "so-sdc-controller.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_BPMN_IP_ADDR = "so-bpmn-infra.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_CATDB_IP_ADDR = "so-catalog-db-adapter.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_IP_ADDR = "so.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_OPENSTACK_IP_ADDR = "so-openstack-adapter.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_REQDB_IP_ADDR = "so-request-db-adapter.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_SDNC_IP_ADDR = "so-sdnc-adapter.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_VFC_IP_ADDR = "so-vfc-adapter.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_SO_VNFM_IP_ADDR = "so-vnfm-adapter.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_UBUNTU_1404_IMAGE = "{{ .Values.ubuntu14Image }}"
-GLOBAL_INJECTED_UBUNTU_1604_IMAGE = "{{ .Values.ubuntu16Image }}"
-GLOBAL_INJECTED_VM_IMAGE_NAME = "{{ .Values.ubuntu14Image }}"
-GLOBAL_INJECTED_VID_IP_ADDR = "vid.{{include "common.namespace" .}}"
-GLOBAL_INJECTED_VM_FLAVOR = "{{ .Values.openStackFlavourMedium }}"
-GLOBAL_INJECTED_VNFSDK_IP_ADDR = "refrepo.{{include "common.namespace" .}}"
+GLOBAL_INJECTED_AAF_IP_ADDR = 'aaf-service.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_AAI1_IP_ADDR = 'aai.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_AAI2_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_APPC_IP_ADDR = 'appc.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_APPC_CDT_IP_ADDR = 'appc-cdt.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_ARTIFACTS_VERSION = '{{.Values.demoArtifactsVersion}}'
+GLOBAL_INJECTED_CLAMP_IP_ADDR = 'clamp.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_CLI_IP_ADDR = 'cli.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_CLOUD_ENV = 'openstack'
+GLOBAL_INJECTED_DCAE_IP_ADDR = 'dcae-healthcheck.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_DMAAP_DR_PROV_IP_ADDR = 'dmaap-dr-prov.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_DMAAP_DR_NODE_IP_ADDR = 'dmaap-dr-node.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_DNS_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_DOCKER_VERSION = '1.2-STAGING-latest'
+GLOBAL_INJECTED_EXTERNAL_DNS = 'N/A'
+GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR = 'log-es.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR = 'log-kibana.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_LOG_LOGSTASH_IP_ADDR = 'log-ls-http.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_AAI_CONTEXT_BUILDER_IP_ADDR = 'pomba-aaictxbuilder.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_SDC_CONTEXT_BUILDER_IP_ADDR = 'pomba-sdcctxbuilder.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_NETWORK_DISC_CONTEXT_BUILDER_IP_ADDR = 'pomba-networkdiscoveryctxbuilder.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_SERVICE_DECOMPOSITION_IP_ADDR = 'pomba-servicedecomposition.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_SDNC_CTX_BUILDER_IP_ADDR = 'pomba-sdncctxbuilder.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_NETWORKDISCOVERY_MICROSERVICE_IP_ADDR = 'pomba-networkdiscovery.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_VALIDATION_SERVICE_IP_ADDR = 'pomba-validation-service.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_KIBANA_IP_ADDR = 'pomba-kibana.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_ELASTIC_SEARCH_IP_ADDR = 'pomba-es.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POMBA_CONTEX_TAGGREGATOR_IP_ADDR = 'pomba-contextaggregator.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_KEYSTONE = '{{ .Values.openStackKeyStoneUrl }}'
+GLOBAL_INJECTED_MR_IP_ADDR = 'message-router.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_BC_IP_ADDR = 'dmaap-bc.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_MUSIC_IP_ADDR = 'music.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_NBI_IP_ADDR = 'nbi.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_NETWORK = '{{ .Values.openStackPrivateNetId }}'
+GLOBAL_INJECTED_NEXUS_DOCKER_REPO = 'nexus3.onap.org:10001'
+GLOBAL_INJECTED_NEXUS_PASSWORD = 'docker'
+GLOBAL_INJECTED_NEXUS_REPO ='https://nexus.onap.org/content/sites/raw'
+GLOBAL_INJECTED_NEXUS_USERNAME = 'docker'
+GLOBAL_INJECTED_OOF_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_OOF_HOMING_IP_ADDR = 'oof-has-api.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_OOF_SNIRO_IP_ADDR = 'oof-osdf.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_OOF_CMSO_IP_ADDR = 'oof-cmso.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_MSB_IP_ADDR = 'msb-iag.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_OPENSTACK_API_KEY = '{{ .Values.config.openStackEncryptedPasswordHere}}'
+GLOBAL_INJECTED_OPENSTACK_PASSWORD = '{{ .Values.openStackPassword }}'
+GLOBAL_INJECTED_OPENSTACK_TENANT_ID = '{{ .Values.openStackTenantId }}'
+GLOBAL_INJECTED_OPENSTACK_USERNAME = '{{ .Values.openStackUserName }}'
+GLOBAL_INJECTED_OPENSTACK_PROJECT_NAME = '{{ .Values.openStackProjectName }}'
+GLOBAL_INJECTED_OPENSTACK_DOMAIN_ID = '{{ .Values.openStackDomainId }}'
+GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION = '{{ .Values.openStackKeystoneAPIVersion }}'
+GLOBAL_INJECTED_REGION_THREE = '{{ .Values.openStackRegionRegionThree }}'
+GLOBAL_INJECTED_KEYSTONE_REGION_THREE = '{{ .Values.openStackKeyStoneUrlRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION_REGION_THREE = '{{ .Values.openStackKeystoneAPIVersionRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_USERNAME_REGION_THREE = '{{ .Values.openStackUserNameRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_PASSWORD_REGION_THREE = '{{ .Values.openStackPasswordRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_MSO_ENCRYPTED_PASSWORD_REGION_THREE  = '{{ .Values.openSackMsoEncryptdPasswordRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_TENANT_ID_REGION_THREE = '{{ .Values.openStackTenantIdRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE = '{{ .Values.openStackProjectNameRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE = '{{ .Values.openStackDomainIdRegionThree }}'
+GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX = '{{ .Values.openStackOamNetworkCidrPrefix }}'
+GLOBAL_INJECTED_POLICY_IP_ADDR = 'pdp.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR = 'drools.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_PORTAL_IP_ADDR = 'portal-app.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POLICY_API_IP_ADDR = 'policy-api.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POLICY_PAP_IP_ADDR = 'policy-pap.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POLICY_DISTRIBUTION_IP_ADDR = 'policy-distribution.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POLICY_PDPX_IP_ADDR = 'policy-xacml-pdp.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_POLICY_APEX_PDP_IP_ADDR = 'policy-apex-pdp.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_PUBLIC_NET_ID = '{{ .Values.openStackPublicNetId }}'
+GLOBAL_INJECTED_REGION = '{{ .Values.openStackRegion }}'
+GLOBAL_INJECTED_SCRIPT_VERSION = '{{ .Values.scriptVersion }}'
+GLOBAL_INJECTED_SDC_BE_IP_ADDR = 'sdc-be.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR = 'sdc-onboarding-be.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SDC_FE_IP_ADDR = 'sdc-fe.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SDC_IP_ADDR = 'N/A'
+GLOBAL_INJECTED_SDNC_IP_ADDR = 'sdnc.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR = 'sdnc-portal.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_APIHAND_IP_ADDR = 'so.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_ASDCHAND_IP_ADDR = 'so-sdc-controller.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_BPMN_IP_ADDR = 'so-bpmn-infra.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_CATDB_IP_ADDR = 'so-catalog-db-adapter.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_IP_ADDR = 'so.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_OPENSTACK_IP_ADDR = 'so-openstack-adapter.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_REQDB_IP_ADDR = 'so-request-db-adapter.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_SDNC_IP_ADDR = 'so-sdnc-adapter.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_VFC_IP_ADDR = 'so-vfc-adapter.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_SO_VNFM_IP_ADDR = 'so-vnfm-adapter.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_UBUNTU_1404_IMAGE = '{{ .Values.ubuntu14Image }}'
+GLOBAL_INJECTED_UBUNTU_1604_IMAGE = '{{ .Values.ubuntu16Image }}'
+GLOBAL_INJECTED_VM_IMAGE_NAME = '{{ .Values.ubuntu14Image }}'
+GLOBAL_INJECTED_VID_IP_ADDR = 'vid.{{include "common.namespace" .}}'
+GLOBAL_INJECTED_VM_FLAVOR = '{{ .Values.openStackFlavourMedium }}'
+GLOBAL_INJECTED_VNFSDK_IP_ADDR = 'refrepo.{{include "common.namespace" .}}'
 
 GLOBAL_INJECTED_PROPERTIES = {
-    "GLOBAL_INJECTED_AAF_IP_ADDR" : "aaf-service.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_AAI1_IP_ADDR" : "aai.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_AAI2_IP_ADDR" : "N/A",
-    "GLOBAL_INJECTED_APPC_IP_ADDR" : "appc.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_APPC_CDT_IP_ADDR" : "appc-cdt.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_ARTIFACTS_VERSION" : "{{.Values.demoArtifactsVersion}}",
-    "GLOBAL_INJECTED_CLAMP_IP_ADDR" : "clamp.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_CLI_IP_ADDR" : "cli.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_CLOUD_ENV" : "openstack",
-    "GLOBAL_INJECTED_DCAE_IP_ADDR" : "dcae-healthcheck.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_DMAAP_DR_PROV_IP_ADDR" : "dmaap-dr-prov.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_DMAAP_DR_NODE_IP_ADDR" : "dmaap-dr-node.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_DNS_IP_ADDR" : "N/A",
-    "GLOBAL_INJECTED_DOCKER_VERSION" : "1.2-STAGING-latest",
-    "GLOBAL_INJECTED_EXTERNAL_DNS" : "N/A",
-    "GLOBAL_INJECTED_KEYSTONE" : "{{ .Values.openStackKeyStoneUrl }}",
-    "GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR" : "log-es.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR" : "log-kibana.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_LOG_LOGSTASH_IP_ADDR" : "log-ls.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_MR_IP_ADDR" : "message-router.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_BC_IP_ADDR" : "dmaap-bc.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POMBA_AAI_CONTEXT_BUILDER_IP_ADDR" : "pomba-aaictxbuilder.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POMBA_SDC_CONTEXT_BUILDER_IP_ADDR" : "pomba-sdcctxbuilder.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POMBA_NETWORK_DISC_CONTEXT_BUILDER_IP_ADDR" : "pomba-networkdiscovery.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POMBA_SERVICE_DECOMPOSITION_IP_ADDR" : "pomba-servicedecomposition.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POMBA_SDNC_CTX_BUILDER_IP_ADDR" : "pomba-sdncctxbuilder.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POMBA_CONTEX_TAGGREGATOR_IP_ADDR" : "pomba-contextaggregator.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_MUSIC_IP_ADDR" : "music.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_NBI_IP_ADDR" : "nbi.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_NETWORK" : "{{ .Values.openStackPrivateNetId }}",
-    "GLOBAL_INJECTED_NEXUS_DOCKER_REPO" : "nexus3.onap.org:10001",
-    "GLOBAL_INJECTED_NEXUS_PASSWORD" : "docker",
-    "GLOBAL_INJECTED_NEXUS_REPO" : "https://nexus.onap.org/content/sites/raw",
-    "GLOBAL_INJECTED_NEXUS_USERNAME" : "docker",
-    "GLOBAL_INJECTED_OOF_IP_ADDR" : "N/A",
-    "GLOBAL_INJECTED_OOF_HOMING_IP_ADDR" : "oof-has-api.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_OOF_SNIRO_IP_ADDR" : "oof-osdf.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_OOF_CMSO_IP_ADDR" : "oof-cmso.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_MSB_IP_ADDR" : "msb-iag.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_OPENSTACK_API_KEY" : "{{ .Values.config.openStackEncryptedPasswordHere}}",
-    "GLOBAL_INJECTED_OPENSTACK_PASSWORD" : "{{ .Values.openStackPassword }}",
-    "GLOBAL_INJECTED_OPENSTACK_TENANT_ID" : "{{ .Values.openStackTenantId }}",
-    "GLOBAL_INJECTED_OPENSTACK_USERNAME" : "{{ .Values.openStackUserName }}",
-    "GLOBAL_INJECTED_OPENSTACK_PROJECT_NAME" : "{{ .Values.openStackProjectName }}",
-    "GLOBAL_INJECTED_OPENSTACK_DOMAIN_ID" : "{{ .Values.openStackDomainId }}",
-    "GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION" : "{{ .Values.openStackKeystoneAPIVersion }}",
-    "GLOBAL_INJECTED_REGION_THREE" : "{{ .Values.openStackRegionRegionThree }}",
-    "GLOBAL_INJECTED_KEYSTONE_REGION_THREE" : "{{ .Values.openStackKeyStoneUrlRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION_REGION_THREE" : "{{ .Values.openStackKeystoneAPIVersionRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_USERNAME_REGION_THREE" : "{{ .Values.openStackUserNameRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_PASSWORD_REGION_THREE" : "{{ .Values.openStackPasswordRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_MSO_ENCRYPTED_PASSWORD_REGION_THREE" : "{{ .Values.openSackMsoEncryptdPasswordRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_TENANT_ID_REGION_THREE" : "{{ .Values.openStackTenantIdRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE" : "{{ .Values.openStackProjectNameRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE" : "{{ .Values.openStackDomainIdRegionThree }}",
-    "GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX" : "{{ .Values.openStackOamNetworkCidrPrefix }}",
-    "GLOBAL_INJECTED_POLICY_IP_ADDR" : "pdp.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR" : "drools.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_PORTAL_IP_ADDR" : "portal-app.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_PUBLIC_NET_ID" : "{{ .Values.openStackPublicNetId }}",
-    "GLOBAL_INJECTED_REGION" : "{{ .Values.openStackRegion }}",
-    "GLOBAL_INJECTED_SDC_BE_IP_ADDR" : "sdc-be.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR" : "sdc-onboarding-be.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SDC_FE_IP_ADDR" : "sdc-fe.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SDC_IP_ADDR" : "N/A",
-    "GLOBAL_INJECTED_SCRIPT_VERSION" : "{{ .Values.scriptVersion }}",
-    "GLOBAL_INJECTED_SDNC_IP_ADDR" : "sdnc.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR" : "sdnc-portal.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_APIHAND_IP_ADDR" : "so.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_ASDCHAND_IP_ADDR" : "so-sdc-controller.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_BPMN_IP_ADDR" : "so-bpmn-infra.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_CATDB_IP_ADDR" : "so-catalog-db-adapter.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_IP_ADDR" : "so.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_OPENSTACK_IP_ADDR" : "so-openstack-adapter.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_REQDB_IP_ADDR" : "so-request-db-adapter.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_SDNC_IP_ADDR" : "so-sdnc-adapter.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_VFC_IP_ADDR" : "so-vfc-adapter.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_SO_VNFM_IP_ADDR" : "so-vnfm-adapter.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_UBUNTU_1404_IMAGE" : "{{.Values.ubuntu14Image}}",
-    "GLOBAL_INJECTED_UBUNTU_1604_IMAGE" : "{{.Values.ubuntu16Image}}",
-    "GLOBAL_INJECTED_VM_IMAGE_NAME" : "{{ .Values.ubuntu14Image }}",
-    "GLOBAL_INJECTED_VID_IP_ADDR" : "vid.{{include "common.namespace" .}}",
-    "GLOBAL_INJECTED_VM_FLAVOR" : "{{ .Values.openStackFlavourMedium }}",
-    "GLOBAL_INJECTED_VNFSDK_IP_ADDR" : "refrepo.{{include "common.namespace" .}}"
+    "GLOBAL_INJECTED_AAF_IP_ADDR" : 'aaf-service.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_AAI1_IP_ADDR" : 'aai.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_AAI2_IP_ADDR" : 'N/A',
+    "GLOBAL_INJECTED_APPC_IP_ADDR" : 'appc.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_APPC_CDT_IP_ADDR" : 'appc-cdt.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_ARTIFACTS_VERSION" : '{{.Values.demoArtifactsVersion}}',
+    "GLOBAL_INJECTED_CLAMP_IP_ADDR" : 'clamp.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_CLI_IP_ADDR" : 'cli.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_CLOUD_ENV" : 'openstack',
+    "GLOBAL_INJECTED_DCAE_IP_ADDR" : 'dcae-healthcheck.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_DMAAP_DR_PROV_IP_ADDR" : 'dmaap-dr-prov.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_DMAAP_DR_NODE_IP_ADDR" : 'dmaap-dr-node.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_DNS_IP_ADDR" : 'N/A',
+    "GLOBAL_INJECTED_DOCKER_VERSION" : '1.2-STAGING-latest',
+    "GLOBAL_INJECTED_EXTERNAL_DNS" : 'N/A',
+    "GLOBAL_INJECTED_KEYSTONE" : '{{ .Values.openStackKeyStoneUrl }}',
+    "GLOBAL_INJECTED_LOG_ELASTICSEARCH_IP_ADDR" : 'log-es.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_LOG_KIBANA_IP_ADDR" : 'log-kibana.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_LOG_LOGSTASH_IP_ADDR" : 'log-ls.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_MR_IP_ADDR" : 'message-router.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_BC_IP_ADDR" : 'dmaap-bc.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POMBA_AAI_CONTEXT_BUILDER_IP_ADDR" : 'pomba-aaictxbuilder.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POMBA_SDC_CONTEXT_BUILDER_IP_ADDR" : 'pomba-sdcctxbuilder.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POMBA_NETWORK_DISC_CONTEXT_BUILDER_IP_ADDR" : 'pomba-networkdiscovery.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POMBA_SERVICE_DECOMPOSITION_IP_ADDR" : 'pomba-servicedecomposition.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POMBA_SDNC_CTX_BUILDER_IP_ADDR" : 'pomba-sdncctxbuilder.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POMBA_CONTEX_TAGGREGATOR_IP_ADDR" : 'pomba-contextaggregator.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_MUSIC_IP_ADDR" : 'music.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_NBI_IP_ADDR" : 'nbi.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_NETWORK" : '{{ .Values.openStackPrivateNetId }}',
+    "GLOBAL_INJECTED_NEXUS_DOCKER_REPO" : 'nexus3.onap.org:10001',
+    "GLOBAL_INJECTED_NEXUS_PASSWORD" : 'docker',
+    "GLOBAL_INJECTED_NEXUS_REPO" : 'https://nexus.onap.org/content/sites/raw',
+    "GLOBAL_INJECTED_NEXUS_USERNAME" : 'docker',
+    "GLOBAL_INJECTED_OOF_IP_ADDR" : 'N/A',
+    "GLOBAL_INJECTED_OOF_HOMING_IP_ADDR" : 'oof-has-api.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_OOF_SNIRO_IP_ADDR" : 'oof-osdf.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_OOF_CMSO_IP_ADDR" : 'oof-cmso.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_MSB_IP_ADDR" : 'msb-iag.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_OPENSTACK_API_KEY" : '{{ .Values.config.openStackEncryptedPasswordHere}}',
+    "GLOBAL_INJECTED_OPENSTACK_PASSWORD" : '{{ .Values.openStackPassword }}',
+    "GLOBAL_INJECTED_OPENSTACK_TENANT_ID" : '{{ .Values.openStackTenantId }}',
+    "GLOBAL_INJECTED_OPENSTACK_USERNAME" : '{{ .Values.openStackUserName }}',
+    "GLOBAL_INJECTED_OPENSTACK_PROJECT_NAME" : '{{ .Values.openStackProjectName }}',
+    "GLOBAL_INJECTED_OPENSTACK_DOMAIN_ID" : '{{ .Values.openStackDomainId }}',
+    "GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION" : '{{ .Values.openStackKeystoneAPIVersion }}',
+    "GLOBAL_INJECTED_REGION_THREE" : '{{ .Values.openStackRegionRegionThree }}',
+    "GLOBAL_INJECTED_KEYSTONE_REGION_THREE" : '{{ .Values.openStackKeyStoneUrlRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_KEYSTONE_API_VERSION_REGION_THREE" : '{{ .Values.openStackKeystoneAPIVersionRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_USERNAME_REGION_THREE" : '{{ .Values.openStackUserNameRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_PASSWORD_REGION_THREE" : '{{ .Values.openStackPasswordRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_MSO_ENCRYPTED_PASSWORD_REGION_THREE" : '{{ .Values.openSackMsoEncryptdPasswordRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_TENANT_ID_REGION_THREE" : '{{ .Values.openStackTenantIdRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_PROJECT_DOMAIN_REGION_THREE" : '{{ .Values.openStackProjectNameRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_USER_DOMAIN_REGION_THREE" : '{{ .Values.openStackDomainIdRegionThree }}',
+    "GLOBAL_INJECTED_OPENSTACK_OAM_NETWORK_CIDR_PREFIX" : '{{ .Values.openStackOamNetworkCidrPrefix }}',
+    "GLOBAL_INJECTED_POLICY_IP_ADDR" : 'pdp.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POLICY_HEALTHCHECK_IP_ADDR" : 'drools.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POLICY_API_IP_ADDR" : 'policy-api.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POLICY_PAP_IP_ADDR" : 'policy-pap.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POLICY_DISTRIBUTION_IP_ADDR" : 'policy-distribution.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POLICY_PDPX_IP_ADDR" :  'policy-xacml-pdp.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_POLICY_APEX_PDP_IP_ADDR" : 'policy-apex-pdp.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_PORTAL_IP_ADDR" : 'portal-app.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_PUBLIC_NET_ID" : '{{ .Values.openStackPublicNetId }}',
+    "GLOBAL_INJECTED_REGION" : '{{ .Values.openStackRegion }}',
+    "GLOBAL_INJECTED_SDC_BE_IP_ADDR" : 'sdc-be.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SDC_BE_ONBOARD_IP_ADDR" : 'sdc-onboarding-be.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SDC_FE_IP_ADDR" : 'sdc-fe.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SDC_IP_ADDR" : 'N/A',
+    "GLOBAL_INJECTED_SCRIPT_VERSION" : '{{ .Values.scriptVersion }}',
+    "GLOBAL_INJECTED_SDNC_IP_ADDR" : 'sdnc.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SDNC_PORTAL_IP_ADDR" : 'sdnc-portal.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_APIHAND_IP_ADDR" : 'so.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_ASDCHAND_IP_ADDR" : 'so-sdc-controller.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_BPMN_IP_ADDR" : 'so-bpmn-infra.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_CATDB_IP_ADDR" : 'so-catalog-db-adapter.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_IP_ADDR" : 'so.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_OPENSTACK_IP_ADDR" : 'so-openstack-adapter.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_REQDB_IP_ADDR" : 'so-request-db-adapter.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_SDNC_IP_ADDR" : 'so-sdnc-adapter.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_VFC_IP_ADDR" : 'so-vfc-adapter.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_SO_VNFM_IP_ADDR" : 'so-vnfm-adapter.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_UBUNTU_1404_IMAGE" : '{{.Values.ubuntu14Image}}',
+    "GLOBAL_INJECTED_UBUNTU_1604_IMAGE" : '{{.Values.ubuntu16Image}}',
+    "GLOBAL_INJECTED_VM_IMAGE_NAME" : '{{ .Values.ubuntu14Image }}',
+    "GLOBAL_INJECTED_VID_IP_ADDR" : 'vid.{{include "common.namespace" .}}',
+    "GLOBAL_INJECTED_VM_FLAVOR" : '{{ .Values.openStackFlavourMedium }}',
+    "GLOBAL_INJECTED_VNFSDK_IP_ADDR" : 'refrepo.{{include "common.namespace" .}}'
 
 }
index b09a64a..6a73d5b 100755 (executable)
@@ -126,6 +126,8 @@ policyAuth: "dGVzdHBkcDphbHBoYTEyMw=="
 policyClientAuth: "cHl0aG9uOnRlc3Q="
 policyUsername: "demo@people.osaaf.org"
 policyPassword: "demo123456!"
+policyComponentUsername: "healthcheck"
+policyComponentPassword: "zb!XztG34"
 # PORTAL
 portalUsername: "demo"
 portalPassword: "Kp8bJ4SXszM0WXlhak3eHlcse"
@@ -145,6 +147,9 @@ vidHealthPassword: "AppPassword!1"
 # DMAAP BC
 bcUsername: "dmaap-bc@dmaap-bc.onap.org"
 bcPassword: "demo123456!"
+# DMAAP KAFKA JAAS
+kafkaJaasUsername: "admin"
+kafkaJaasPassword: "admin_secret"
 
 # default number of instances
 replicaCount: 1
index 09ebd1d..fcf5283 100644 (file)
@@ -28,8 +28,8 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdc-backend:1.4-STAGING-latest
-backendInitImage: onap/sdc-backend-init:1.4-STAGING-latest
+image: onap/sdc-backend:1.4.0
+backendInitImage: onap/sdc-backend-init:1.4.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 9d24075..64f9646 100644 (file)
@@ -28,8 +28,8 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdc-cassandra:1.4-STAGING-latest
-cassandraInitImage: onap/sdc-cassandra-init:1.4-STAGING-latest
+image: onap/sdc-cassandra:1.4.0
+cassandraInitImage: onap/sdc-cassandra-init:1.4.0
 
 pullPolicy: Always
 
index 8108d81..bf1d9b4 100644 (file)
@@ -27,9 +27,9 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dcae-be:1.3-STAGING-latest
+image: onap/dcae-be:1.3.0
 pullPolicy: Always
-backendInitImage: onap/dcae-tools:1.3-STAGING-latest
+backendInitImage: onap/dcae-tools:1.3.0
 
 # flag to enable debugging - application support required
 debugEnabled: false
index cb0b8da..2b8fd90 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dcae-dt:1.2-STAGING-latest
+image: onap/dcae-dt:1.2.0
 pullPolicy: IfNotPresent
 config:
   javaOptions: -XX:MaxPermSize=256m -Xmx1024m -Dconfig.home=config -Dlog.home=/var/lib/jetty/logs/ -Dlogging.config=config/dcae-dt/logback-spring.xml
index 7999952..4b40bce 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dcae-fe:1.3-STAGING-latest
+image: onap/dcae-fe:1.3.0
 pullPolicy: Always
 config:
   javaOptions: -XX:MaxPermSize=256m -Xmx1024m -Dconfig.home=config -Dlog.home=/var/lib/jetty/logs/ -Dlogging.config=config/dcae-fe/logback-spring.xml
index 7e23283..e787948 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/dcae-tosca-app:1.3-STAGING-latest
+image: onap/dcae-tosca-app:1.3.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 9ceef30..618b23a 100644 (file)
@@ -35,4 +35,4 @@ spec:
   persistentVolumeReclaimPolicy: {{ .Values.persistence.volumeReclaimPolicy }}
   hostPath:
     path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.persistence.mountSubPath }}
-{{- end -}}
+{{- end -}}
\ No newline at end of file
index 2f343c8..e1f01b6 100644 (file)
@@ -46,4 +46,4 @@ spec:
   storageClassName: "{{ .Values.persistence.storageClass }}"
 {{- end }}
 {{- end }}
-{{- end -}}
+{{- end -}}
\ No newline at end of file
index b4c86ee..a84f738 100644 (file)
@@ -31,8 +31,8 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdc-elasticsearch:1.4-STAGING-latest
-elasticInitImage: onap/sdc-init-elasticsearch:1.4-STAGING-latest
+image: onap/sdc-elasticsearch:1.4.0
+elasticInitImage: onap/sdc-init-elasticsearch:1.4.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index e5e5a04..e95223b 100644 (file)
@@ -28,7 +28,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdc-frontend:1.4-STAGING-latest
+image: onap/sdc-frontend:1.4.0
 pullPolicy: Always
 
 config:
index 5c834d5..21b0b05 100644 (file)
@@ -28,7 +28,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdc-kibana:1.4-STAGING-latest
+image: onap/sdc-kibana:1.4.0
 pullPolicy: Always
 
 config:
index 87556b0..70895d3 100644 (file)
@@ -88,6 +88,8 @@ spec:
           - name: SDC_PASSWORD
             valueFrom:
               secretKeyRef: {name: {{ .Release.Name }}-sdc-cs-secrets, key: sdc_password}
+          - name: SDC_CERT_DIR
+            value: {{ .Values.cert.certDir }}
           volumeMounts:
           - name: {{ include "common.fullname" . }}-environments
             mountPath: /root/chef-solo/environments/
@@ -99,6 +101,8 @@ spec:
           - name: {{ include "common.fullname" . }}-logback
             mountPath: /tmp/logback.xml
             subPath: logback.xml
+          - name: {{ include "common.fullname" . }}-cert-storage
+            mountPath: "{{ .Values.cert.certDir }}"
           lifecycle:
             postStart:
               exec:
@@ -133,5 +137,8 @@ spec:
           defaultMode: 0755
       - name:  {{ include "common.fullname" . }}-logs
         emptyDir: {}
+      - name:  {{ include "common.fullname" . }}-cert-storage
+        persistentVolumeClaim:
+          claimName: {{ include "common.fullname" . }}-cert
       imagePullSecrets:
       - name: "{{ include "common.namespace" . }}-docker-registry-key"
diff --git a/kubernetes/sdc/charts/sdc-onboarding-be/templates/pv.yaml b/kubernetes/sdc/charts/sdc-onboarding-be/templates/pv.yaml
new file mode 100644 (file)
index 0000000..b292ff9
--- /dev/null
@@ -0,0 +1,38 @@
+{{/*
+# ================================================================================
+# Copyright (C) 2019, Nordix Foundation. All rights reserved.
+# ================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+{{- if and .Values.persistence.enabled (not .Values.cert.persistence.existingClaim) -}}
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: {{ include "common.fullname" . }}-cert
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+    name: {{ include "common.fullname" . }}
+spec:
+  capacity:
+    storage: {{ .Values.cert.persistence.size}}
+  accessModes:
+    - {{ .Values.cert.persistence.accessMode }}
+  persistentVolumeReclaimPolicy: {{ .Values.cert.persistence.volumeReclaimPolicy }}
+  hostPath:
+    path: {{ .Values.global.persistence.mountPath | default .Values.persistence.mountPath }}/{{ .Release.Name }}/{{ .Values.cert.persistence.mountSubPath }}
+{{- end -}}
\ No newline at end of file
diff --git a/kubernetes/sdc/charts/sdc-onboarding-be/templates/pvc.yaml b/kubernetes/sdc/charts/sdc-onboarding-be/templates/pvc.yaml
new file mode 100644 (file)
index 0000000..eb2c372
--- /dev/null
@@ -0,0 +1,49 @@
+{{/*
+# ================================================================================
+# Copyright (C) 2019, Nordix Foundation. All rights reserved.
+# ================================================================================
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+{{- if and .Values.cert.persistence.enabled (not .Values.cert.persistence.existingClaim) -}}
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: {{ include "common.fullname" . }}-cert
+  namespace: {{ include "common.namespace" . }}
+  labels:
+    app: {{ include "common.name" . }}
+    chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
+    release: "{{ .Release.Name }}"
+    heritage: "{{ .Release.Service }}"
+{{- if .Values.cert.persistence.annotations }}
+  annotations:
+{{ toYaml .Values.cert.persistence.annotations | indent 4 }}
+{{- end }}
+spec:
+  selector:
+    matchLabels:
+      name: {{ include "common.fullname" . }}
+  accessModes:
+    - {{ .Values.cert.persistence.accessMode }}
+  resources:
+    requests:
+      storage: {{ .Values.cert.persistence.size }}
+{{- if .Values.cert.persistence.storageClass }}
+{{- if (eq "-" .Values.cert.persistence.storageClass) }}
+  storageClassName: ""
+{{- else }}
+  storageClassName: "{{ .Values.cert.persistence.storageClass }}"
+{{- end }}
+{{- end }}
+{{- end -}}
\ No newline at end of file
index 9739da1..2b7edd9 100644 (file)
@@ -28,8 +28,8 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdc-onboard-backend:1.4-STAGING-latest
-onboardingInitImage: onap/sdc-onboard-cassandra-init:1.4-STAGING-latest
+image: onap/sdc-onboard-backend:1.4.0
+onboardingInitImage: onap/sdc-onboard-cassandra-init:1.4.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
@@ -90,6 +90,18 @@ persistence:
   mountPath: /dockerdata-nfs
   mountSubPath: /sdc/sdc-cs/CS
 
+##Certificate storage persistence
+##This is temporary solution for SDC-1980
+cert:
+  certDir: /var/lib/jetty/cert
+  persistence:
+    enabled: true
+    size: 10Mi
+    accessMode: ReadOnlyMany
+    volumeReclaimPolicy: Retain
+    mountSubPath: /sdc/onbaording/cert
+
+
 ingress:
   enabled: false
 
index 8737b33..8859b89 100644 (file)
@@ -28,8 +28,8 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/workflow-backend:1.4.0-SNAPSHOT
-configInitImage: onap/workflow-init:1.4.0-SNAPSHOT
+image: onap/workflow-backend:1.4.0
+configInitImage: onap/workflow-init:1.4.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index c284f2d..c1babf3 100644 (file)
@@ -70,7 +70,45 @@ spec:
             value: {{ .Values.config.javaOptions }}
           - name: BACKEND
             value: {{ .Values.config.backendServerURL }}
+          - name: IS_HTTPS
+            value: "{{ .Values.config.isHttpsEnabled}}"
+            {{ if and .Values.config.isHttpsEnabled (eq .Values.security.isDefaultStore false) }}
+          - name: KEYSTORE_PASS
+            {{- if .Values.global.security.keysFromCa }}
+            valueFrom:
+              secretKeyRef:
+                name: mft-sdc
+                key: keystore-password.txt
+            {{ else }}
+            value: {{ .Values.global.security.keyStorePass}}
+            {{- end }}
+          - name: TRUSTSTORE_PASS
+            {{- if .Values.global.security.keysFromCa }}
+            valueFrom:
+              secretKeyRef:
+                name: mft-catruststore
+                key: keystore-password.txt
+            {{ else }}
+            value: {{ .Values.global.security.trustStorePass}}
+            {{- end }}
+          - name: TRUSTSTORE_PATH
+            value: "{{ .Values.security.storePath }}/{{ .Values.security.truststoreFilename }}"
+          - name: KEYSTORE_PATH
+            value: "{{ .Values.security.storePath }}/{{ .Values.security.keystoreFilename }}"
+          - name: TRUSTSTORE_TYPE
+            value: {{ .Values.security.truststore.type }}
+          - name: KEYSTORE_TYPE
+            value: {{ .Values.security.keystore.type }}
+            {{ end }}
           volumeMounts:
+          {{ if and .Values.config.isHttpsEnabled (eq .Values.security.isDefaultStore false) }}
+          - name: {{ include "common.fullname" . }}-jetty-https-truststore
+            mountPath: /var/lib/jetty/{{ .Values.security.storePath }}/{{ .Values.security.truststoreFilename }}
+            subPath: {{ .Values.security.truststoreFilename }}
+          - name: {{ include "common.fullname" . }}-jetty-https-keystore
+            mountPath: /var/lib/jetty/etc/{{ .Values.security.storePath }}/{{ .Values.security.keystoreFilename }}
+            subPath: {{ .Values.security.keystoreFilename }}
+          {{ end }}
           - name: {{ include "common.fullname" . }}-localtime
             mountPath: /etc/localtime
             readOnly: true
index 2990de3..87ca360 100644 (file)
@@ -40,10 +40,16 @@ spec:
     - port: {{ .Values.service.internalPort }}
       nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
       name: {{ .Values.service.portName | default "http" }}
+    - port: {{ .Values.service.internalPort2 }}
+      nodePort: {{ .Values.global.nodePortPrefixExt | default .Values.nodePortPrefixExt }}{{ .Values.service.nodePort2 }}
+      name: {{ .Values.service.portName2 | default "https" }}
     {{- else -}}
     - port: {{ .Values.service.externalPort }}
       targetPort: {{ .Values.service.internalPort }}
       name: {{ .Values.service.portName | default "http" }}
+    - port: {{ .Values.service.externalPort2 }}
+      targetPort: {{ .Values.service.internalPort2 }}
+      name: {{ .Values.service.portName2 | default "https" }}
     {{- end}}
   selector:
     app: {{ include "common.name" . }}
index 45d2965..a217de5 100644 (file)
@@ -17,6 +17,7 @@
 #################################################################
 global:
   nodePortPrefix: 302
+  nodePortPrefixExt: 304
   readinessRepository: oomk8s
   readinessImage: readiness-check:2.0.2
   loggingRepository: docker.elastic.co
@@ -27,7 +28,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/workflow-frontend:1.4.0-SNAPSHOT
+image: onap/workflow-frontend:1.4.0
 pullPolicy: Always
 
 # flag to enable debugging - application support required
@@ -36,6 +37,16 @@ debugEnabled: false
 config:
   javaOptions: "-Xdebug -agentlib:jdwp=transport=dt_socket,address=7000,server=y,suspend=n -Xmx256m -Xms256m"
   backendServerURL: "http://sdc-wfd-be:8080"
+  isHttpsEnabled: false
+
+# https relevant settings. Change in case you have other trust files then default ones.
+security:
+  isDefaultStore: true
+  truststoreType: "JKS"
+  keystoreType: "JKS"
+  truststoreFilename: "truststore"
+  keystoreFilename: "keystore"
+  storePath: "etc"
 
 # default number of instances
 replicaCount: 1
@@ -62,6 +73,10 @@ service:
   externalPort: 8080
   portName: sdc-wfd-fe
   nodePort: "56"
+  portName2: sdc-wfd-fe2
+  internalPort2: 8443
+  externalPort2: 8443
+  nodePort2: "31"
 
 ingress:
   enabled: false
index 4b30d43..8552fd5 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdnc-dmaap-listener-image:1.5-STAGING-latest
+image: onap/sdnc-dmaap-listener-image:1.5.1
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index d7c33a6..ed19683 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdnc-ansible-server-image:1.5-STAGING-latest
+image: onap/sdnc-ansible-server-image:1.5.1
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index ba99020..4a51a96 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/admportal-sdnc-image:1.5-STAGING-latest
+image: onap/admportal-sdnc-image:1.5.1
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 4da1ffe..763af62 100644 (file)
@@ -27,7 +27,7 @@ global:
 #################################################################
 # application image
 repository: nexus3.onap.org:10001
-image: onap/sdnc-ueb-listener-image:1.5-STAGING-latest
+image: onap/sdnc-ueb-listener-image:1.5.1
 pullPolicy: Always
 
 # flag to enable debugging - application support required
index 9728fab..d60319f 100644 (file)
@@ -46,7 +46,15 @@ spec:
               fieldPath: metadata.namespace
         image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
         imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
-        name: {{ include "common.name" . }}-readiness
+        name: {{ include "common.name" . }}-readiness   
+      - name: {{ include "common.name" . }}-chown
+        image: "busybox"
+        command: ["sh", "-c", "chown -R {{ .Values.config.odlUid }}:{{ .Values.config.odlGid}} {{ .Values.persistence.mdsalPath }} ; chown -R {{ .Values.config.odlUid }}:{{ .Values.config.odlGid}} {{ .Values.certpersistence.certPath }}"]
+        volumeMounts:
+          - mountPath: {{ .Values.persistence.mdsalPath }}
+            name: {{ include "common.fullname" . }}-mdsal
+          - mountPath: {{ .Values.certpersistence.certPath }}
+            name: {{ include "common.fullname" . }}-certs
       containers:
         - name: {{ include "common.name" . }}
           image: "{{ include "common.repository" . }}/{{ .Values.image }}"
index e2e8579..0cd5b60 100644 (file)
@@ -32,13 +32,15 @@ global:
 # application images
 repository: nexus3.onap.org:10001
 pullPolicy: Always
-image: onap/sdnc-image:1.5-STAGING-latest
+image: onap/sdnc-image:1.5.1
 
 # flag to enable debugging - application support required
 debugEnabled: false
 
 # application configuration
 config:
+  odlUid: 100
+  odlGid: 101
   odlPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
   dbRootPassword: secretpassword
   dbSdnctlPassword: gamma
index 92c5c78..f8cfc4c 100755 (executable)
@@ -19,9 +19,10 @@ aai:
   workflowAaiDistributionDelay: PT30S
   pnfEntryNotificationTimeout: P14D
 cds:
-  endpoint: blueprints-processor
+  endpoint: cds-blueprints-processor-grpc
   port: 9111
   auth: Basic Y2NzZGthcHBzOmNjc2RrYXBwcw==
+  timeout: 600
 camunda:
   bpm:
     admin-user:
index 68df124..d6aa29c 100755 (executable)
@@ -27,7 +27,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/bpmn-infra:1.4.0-STAGING-latest
+image: onap/so/bpmn-infra:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index e807317..8a5f418 100755 (executable)
@@ -27,7 +27,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/catalog-db-adapter:1.4.0-STAGING-latest
+image: onap/so/catalog-db-adapter:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index 9550926..ff125ec 100644 (file)
@@ -32,7 +32,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/so-monitoring:1.4.0-STAGING-latest
+image: onap/so/so-monitoring:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index b139ac7..215a449 100755 (executable)
@@ -26,7 +26,7 @@ global:
 #################################################################
 # Application configuration defaults.
 #################################################################
-image: onap/so/openstack-adapter:1.4.0-STAGING-latest
+image: onap/so/openstack-adapter:1.4.1
 pullPolicy: Always
 repository: nexus3.onap.org:10001
 
index 7a40fac..97b5ab5 100755 (executable)
@@ -27,7 +27,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/request-db-adapter:1.4.0-STAGING-latest
+image: onap/so/request-db-adapter:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index c330f9d..1d7ee6f 100755 (executable)
@@ -27,7 +27,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/sdc-controller:1.4.0-STAGING-latest
+image: onap/so/sdc-controller:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index 8296c69..2671f1c 100755 (executable)
@@ -27,7 +27,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/sdnc-adapter:1.4.0-STAGING-latest
+image: onap/so/sdnc-adapter:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index 22c3f12..d0b887b 100755 (executable)
@@ -27,7 +27,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/vfc-adapter:1.4.0-STAGING-latest
+image: onap/so/vfc-adapter:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index d544247..50b9de0 100755 (executable)
@@ -26,7 +26,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/vnfm-adapter:1.4.0
+image: onap/so/vnfm-adapter:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index b096b55..91fc9db 100755 (executable)
@@ -43,7 +43,7 @@ global:
 # Application configuration defaults.
 #################################################################
 repository: nexus3.onap.org:10001
-image: onap/so/api-handler-infra:1.4.0-STAGING-latest
+image: onap/so/api-handler-infra:1.4.1
 pullPolicy: Always
 
 replicaCount: 1
index 2abe7fd..346c037 100644 (file)
@@ -27,7 +27,7 @@ metadata:
       {
           "serviceName": "usecaseui-server",
           "version": "v1",
-          "url": "/api/usecaseui/server/v1",
+          "url": "/api/usecaseui-server/v1",
           "protocol": "REST",
           "port": "{{.Values.service.internalPort}}",
           "visualRange":"1"
index 1849c46..c3c6151 100644 (file)
@@ -37,7 +37,7 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - vfc-db
+        - vfc-mariadb
         env:
         - name: NAMESPACE
           valueFrom:
index a96276b..2eb74e2 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/catalog:1.3.0-STAGING-latest
+image: onap/vfc/catalog:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 704d900..d4c71b6 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/emsdriver:1.3.0-STAGING-latest
+image: onap/vfc/emsdriver:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index a25a497..e529f47 100644 (file)
@@ -32,22 +32,6 @@ spec:
       annotations:
         sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
     spec:
-      initContainers:
-#Example init container for dependency checking
-#      - command:
-#        - /root/ready.py
-#        args:
-#        - --container-name
-#        - mariadb
-#        env:
-#        - name: NAMESPACE
-#          valueFrom:
-#            fieldRef:
-#              apiVersion: v1
-#              fieldPath: metadata.namespace
-#        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-#        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
-#        name: {{ include "common.name" . }}-readiness
       containers:
         - name: {{ include "common.name" . }}
           image: "{{ include "common.repository" . }}/{{ .Values.image }}"
index 22e4873..47145fc 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/gvnfmdriver:1.3.0-STAGING-latest
+image: onap/vfc/gvnfmdriver:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 5cd1d75..70125e9 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/nfvo/svnfm/huawei:1.3.0-STAGING-latest
+image: onap/vfc/nfvo/svnfm/huawei:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 43eafe4..8833a5a 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/jujudriver:1.3.0-STAGING-latest
+image: onap/vfc/jujudriver:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index c481cda..211ff72 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/multivimproxy:1.3.0-STAGING-latest
+image: onap/vfc/multivimproxy:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index dfebe76..74f5b67 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/nfvo/svnfm/nokiav2:1.3.1-STAGING-latest
+image: onap/vfc/nfvo/svnfm/nokiav2:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
diff --git a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/.helmignore b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/.helmignore
deleted file mode 100644 (file)
index f0c1319..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
diff --git a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/resources/config/logging/logback.xml b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/resources/config/logging/logback.xml
deleted file mode 100644 (file)
index 747c6b6..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<configuration>
-    <include resource="org/springframework/boot/logging/logback/defaults.xml"/>
-    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
-        <encoder>
-            <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %msg %n</pattern>
-            <charset>UTF-8</charset>
-        </encoder>
-    </appender>
-    <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
-        <file>/var/log/onap/vfc/svnfm/nokiavnfmdriver.log</file>
-      <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
-      <fileNamePattern>/var/log/onap/vfc/svnfm/nokiavnfmdriver.%i.log.zip
-      </fileNamePattern>
-      <minIndex>1</minIndex>
-      <maxIndex>9</maxIndex>
-    </rollingPolicy>
-        <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
-            <maxFileSize>10MB</maxFileSize>
-        </triggeringPolicy>
-        <encoder>
-            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %X{req.method} %X{req.remoteHost} %X{req.requestURI} [%t] %-5level %logger{50} %line - %m%n</pattern>
-            <charset>UTF-8</charset>
-        </encoder>
-    </appender>
-    <root level="INFO">
-        <appender-ref ref="CONSOLE"></appender-ref>
-        <appender-ref ref="FILE"></appender-ref>
-    </root>
-</configuration>
\ No newline at end of file
diff --git a/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml b/kubernetes/vfc/charts/vfc-nokia-vnfm-driver/templates/deployment.yaml
deleted file mode 100644 (file)
index eb0885a..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright © 2017 Amdocs, Bell Canada
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: {{ include "common.fullname" . }}
-  namespace: {{ include "common.namespace" . }}
-  labels:
-    app: {{ include "common.name" . }}
-    chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
-    release: {{ .Release.Name }}
-    heritage: {{ .Release.Service }}
-spec:
-  replicas: {{ .Values.replicaCount }}
-  template:
-    metadata:
-      labels:
-        app: {{ include "common.name" . }}
-        release: {{ .Release.Name }}
-      annotations:
-        sidecar.istio.io/inject: "{{.Values.istioSidecar}}"
-    spec:
-      initContainers:
-#Example init container for dependency checking
-#      - command:
-#        - /root/ready.py
-#        args:
-#        - --container-name
-#        - mariadb
-#        env:
-#        - name: NAMESPACE
-#          valueFrom:
-#            fieldRef:
-#              apiVersion: v1
-#              fieldPath: metadata.namespace
-#        image: "{{ .Values.global.readinessRepository }}/{{ .Values.global.readinessImage }}"
-#        imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
-#        name: {{ include "common.name" . }}-readiness
-      containers:
-        - name: {{ include "common.name" . }}
-          image: "{{ include "common.repository" . }}/{{ .Values.image }}"
-          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
-          ports:
-          - containerPort: {{ .Values.service.internalPort }}
-          # disable liveness probe when breakpoints set in debugger
-          # so K8s doesn't restart unresponsive container
-          {{ if .Values.liveness.enabled }}
-          livenessProbe:
-            tcpSocket:
-              port: {{ .Values.service.internalPort }}
-            initialDelaySeconds: {{ .Values.liveness.initialDelaySeconds }}
-            periodSeconds: {{ .Values.liveness.periodSeconds }}
-          {{ end }}
-          readinessProbe:
-            tcpSocket:
-              port: {{ .Values.service.internalPort }}
-            initialDelaySeconds: {{ .Values.readiness.initialDelaySeconds }}
-            periodSeconds: {{ .Values.readiness.periodSeconds }}
-          env:
-            - name: MSB_ADDR
-              value: "{{ .Values.global.config.msbServiceName }}:{{ .Values.global.config.msbPort }}"
-          volumeMounts:
-          - name: {{ include "common.fullname" . }}-localtime
-            mountPath: /etc/localtime
-            readOnly: true
-          - name: {{ include "common.fullname" . }}-logs
-            mountPath: /var/log/onap
-          - name: {{ include "common.fullname" . }}-logback
-            mountPath: /opt/vfc/nokiavnfmdriver/config/logback.xml
-            subPath: logback.xml
-          resources:
-{{ include "common.resources" . | indent 12 }}
-        {{- if .Values.nodeSelector }}
-        nodeSelector:
-{{ toYaml .Values.nodeSelector | indent 10 }}
-        {{- end -}}
-        {{- if .Values.affinity }}
-        affinity:
-{{ toYaml .Values.affinity | indent 10 }}
-        {{- end }}
-
-        # side car containers
-        - name: {{ include "common.name" . }}-filebeat-onap
-          image: "{{ .Values.global.loggingRepository }}/{{ .Values.global.loggingImage }}"
-          imagePullPolicy: {{ .Values.global.pullPolicy | default .Values.pullPolicy }}
-          volumeMounts:
-          - name: {{ include "common.fullname" . }}-filebeat-conf
-            mountPath: /usr/share/filebeat/filebeat.yml
-            subPath: filebeat.yml
-          - name: {{ include "common.fullname" . }}-logs
-            mountPath: /var/log/onap
-          - name: {{ include "common.fullname" . }}-data-filebeat
-            mountPath: /usr/share/filebeat/data
-
-      volumes:
-        - name: {{ include "common.fullname" . }}-localtime
-          hostPath:
-            path: /etc/localtime
-        - name:  {{ include "common.fullname" . }}-logs
-          emptyDir: {}
-        - name: {{ include "common.fullname" . }}-logback
-          configMap:
-            name : {{ include "common.fullname" . }}-logging-configmap
-
-        - name: {{ include "common.fullname" . }}-filebeat-conf
-          configMap:
-            name: {{ .Release.Name }}-vfc-filebeat-configmap
-        - name: {{ include "common.fullname" . }}-data-filebeat
-          emptyDir: {}
-      imagePullSecrets:
-      - name: "{{ include "common.namespace" . }}-docker-registry-key"
index 24a42fe..217759e 100644 (file)
@@ -37,7 +37,7 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - vfc-db
+        - vfc-mariadb
         env:
         - name: NAMESPACE
           valueFrom:
index 554d20b..f42cec0 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/nslcm:1.3.0-STAGING-latest
+image: onap/vfc/nslcm:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
similarity index 97%
rename from kubernetes/vfc/charts/vfc-db/Chart.yaml
rename to kubernetes/vfc/charts/vfc-redis/Chart.yaml
index 64e6c29..90ffba9 100644 (file)
@@ -14,5 +14,5 @@
 
 apiVersion: v1
 description: ONAP VFC - DB
-name: vfc-db
+name: vfc-redis
 version: 4.0.0
index 03feb71..71cf4cc 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/resmanagement:1.3.0-STAGING-latest
+image: onap/vfc/resmanagement:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index f941ae8..67f57e8 100644 (file)
@@ -37,7 +37,7 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - vfc-db
+        - vfc-mariadb
         env:
         - name: NAMESPACE
           valueFrom:
index d1d7681..abefe06 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/vnflcm:1.3.0-STAGING-latest
+image: onap/vfc/vnflcm:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index d197da6..5169f79 100644 (file)
@@ -37,7 +37,7 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - vfc-db
+        - vfc-mariadb
         env:
         - name: NAMESPACE
           valueFrom:
index d709d9d..d318dac 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/vnfmgr:1.3.0-STAGING-latest
+image: onap/vfc/vnfmgr:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index f2a8b05..7982fec 100644 (file)
@@ -37,7 +37,7 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - vfc-db
+        - vfc-mariadb
         env:
         - name: NAMESPACE
           valueFrom:
index 62b2463..7522685 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/vnfres:1.3.0-STAGING-latest
+image: onap/vfc/vnfres:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 598cb4d..daf1429 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/wfengine-activiti:1.3.0-STAGING-latest
+image: onap/vfc/wfengine-activiti:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 26d9888..1c7444f 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/wfengine-mgrservice:1.3.0-STAGING-latest
+image: onap/vfc/wfengine-mgrservice:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index b9d7288..4c2a546 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/ztesdncdriver:1.2.0-STAGING-latest
+image: onap/vfc/ztesdncdriver:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index d37c241..0b9252b 100644 (file)
@@ -29,7 +29,7 @@ global:
 flavor: small
 
 repository: nexus3.onap.org:10001
-image: onap/vfc/ztevnfmdriver:1.3.0-STAGING-latest
+image: onap/vfc/ztevnfmdriver:1.3.0
 pullPolicy: Always
 
 #Istio sidecar injection policy
index 63bbef2..e004cb4 100644 (file)
@@ -27,7 +27,7 @@ subChartsOnly:
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/vid:4.0-STAGING-latest
+image: onap/vid:4.0.0
 pullPolicy: Always
 
 # mariadb image for initializing
@@ -112,4 +112,4 @@ resources:
     requests:
       cpu: 200m
       memory: 2Gi
-  unlimited: {}
\ No newline at end of file
+  unlimited: {}