Merge "change usecase-ui-server api url"
authorAlexis de Talhouët <adetalhouet89@gmail.com>
Fri, 10 May 2019 13:14:49 +0000 (13:14 +0000)
committerGerrit Code Review <gerrit@onap.org>
Fri, 10 May 2019 13:14:49 +0000 (13:14 +0000)
20 files changed:
docs/example-integration-override.yaml
docs/helm-search.txt
docs/oom_quickstart_guide.rst
docs/oom_setup_kubernetes_rancher.rst
kubernetes/aai
kubernetes/clamp/values.yaml
kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
kubernetes/dcaegen2/charts/dcae-policy-handler/resources/config/config.json
kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml
kubernetes/dmaap/components/dmaap-bc/values.yaml
kubernetes/dmaap/values.yaml
kubernetes/onap/resources/overrides/openstack.yaml
kubernetes/policy/charts/drools/resources/config/opt/policy/config/drools/base.conf
kubernetes/policy/charts/policy-xacml-pdp/resources/config/xacml.properties [new file with mode: 0644]
kubernetes/policy/resources/config/pe/console.conf
kubernetes/robot/ete-k8s.sh
kubernetes/robot/eteHelm-k8s.sh
kubernetes/sdc/charts/sdc-wfd-fe/templates/deployment.yaml
kubernetes/sdc/charts/sdc-wfd-fe/templates/service.yaml
kubernetes/sdc/charts/sdc-wfd-fe/values.yaml

index 9c336d6..56699d9 100644 (file)
@@ -1,36 +1,46 @@
 global:
   repository: 10.12.5.2:5000
   pullPolicy: IfNotPresent
+#################################################################
+# This override file configures openstack parameters for ONAP
+#################################################################
+appc:
+  config:
+    enableClustering: false
+    openStackType: "OpenStackProvider"
+    openStackName: "OpenStack"
+    openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
+    openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE"
+    openStackDomain: "Default"
+    openStackUserName: "OPENSTACK_USERNAME_HERE"
+    openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 robot:
-  enabled: true
-  flavor: large
   appcUsername: "appc@appc.onap.org"
-  appcPassword: "APPC_PASSWORD_HERE"
+  appcPassword: "demo123456!"
   openStackKeyStoneUrl: "http://10.12.25.2:5000"
   openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4"
   openStackTenantId: "09d8566ea45e43aa974cf447ed591d77"
   openStackUserName: "OPENSTACK_USERNAME_HERE"
   ubuntu14Image: "ubuntu-14-04-cloud-amd64"
   ubuntu16Image: "ubuntu-16-04-cloud-amd64"
-  openStackPrivateNetId: "d4ab89ff-c735-4ce4-93f6-cff445157b98"
-  openStackPrivateSubnetId: "46c2391c-ed98-4fb0-8ab7-88678bc55b9f"
+  openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313"
+  openStackPrivateSubnetId: "2a0e8888-f93e-4615-8d28-fc3d4d087fc3"
   openStackPrivateNetCidr: "10.0.0.0/16"
-  openStackSecurityGroup: "3914301b-2996-414f-ba0a-da4b2275a753"
+  openStackSecurityGroup: "3a7a1e7e-6d15-4264-835d-fab1ae81e8b0"
   openStackOamNetworkCidrPrefix: "10.0"
-  dcaeCollectorIp: "10.12.5.46"
+  dcaeCollectorIp: "10.12.6.88"
   vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh"
-  demoArtifactsVersion: "1.3.0"
+  demoArtifactsVersion: "1.4.0-SNAPSHOT"
   demoArtifactsRepoUrl: "https://nexus.onap.org/content/repositories/releases"
-  scriptVersion: "1.3.0"
-  rancherIpAddress: "10.12.6.38"
+  scriptVersion: "1.4.0-SNAPSHOT"
+  rancherIpAddress: "10.12.5.127"
   config:
-    openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HEREXXXXXXXXXXXXXXXX"
+    # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
+    openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 so:
-  enabled: true
+  # so server configuration
   so-catalog-db-adapter:
     config:
       openStackUserName: "OPENSTACK_USERNAME_HERE"
       openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
-      openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HEREXXXXXXXXXXXXXXXX"
-
-
+      openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
\ No newline at end of file
index db95e4f..036ad03 100644 (file)
@@ -1,31 +1,42 @@
 NAME                   CHART VERSION   APP VERSION     DESCRIPTION                                 
-local/onap             4.0.0           Dublin          Open Network Automation Platform (ONAP)     
-local/aaf              4.0.0                           ONAP Application Authorization Framework    
-local/aai              4.0.0                           ONAP Active and Available Inventory         
-local/cassandra        4.0.0                           ONAP cassandra                              
-local/cds              4.0.0                           ONAP Common Design Studio                   
-local/clamp            4.0.0                           ONAP Clamp                                  
-local/cli              4.0.0                           ONAP Command Line Interface                 
-local/consul           4.0.0                           ONAP Consul Agent                           
-local/contrib          4.0.0                           ONAP optional tools                         
-local/dcaegen2         4.0.0                           ONAP DCAE Gen2                              
-local/dmaap            4.0.1                           ONAP DMaaP components                       
-local/esr              4.0.0                           ONAP External System Register               
-local/log              4.0.0                           ONAP Logging ElasticStack                   
-local/msb              4.0.0                           ONAP MicroServices Bus                      
-local/multicloud       4.0.0                           ONAP multicloud broker                      
-local/nbi              4.0.0                           ONAP Northbound Interface                   
-local/oof              4.0.0                           ONAP Optimization Framework                 
-local/pnda             4.0.0                           ONAP DCAE PNDA                              
-local/policy           4.0.0                           ONAP Policy Administration Point            
-local/pomba            4.0.0                           ONAP Post Orchestration Model Based Audit   
-local/portal           4.0.0                           ONAP Web Portal                             
-local/postgres         4.0.0                           ONAP Postgres Server                        
-local/robot            4.0.0                           A helm Chart for kubernetes-ONAP Robot      
-local/sdnc-prom        4.0.0                           ONAP SDNC Policy Driven Ownership Management
-local/sniro-emulator   4.0.0                           ONAP Mock Sniro Emulator                    
-local/so               4.0.0                           ONAP Service Orchestrator                   
-local/uui              4.0.0                           ONAP uui                                    
-local/vfc              4.0.0                           ONAP Virtual Function Controller (VF-C)     
-local/vid              4.0.0                           ONAP Virtual Infrastructure Deployment      
-local/vnfsdk           4.0.0                           ONAP VNF SDK 
+local/onap                     4.0.0           Dublin  Open Network Automation Platform (ONAP)
+local/aaf                      4.0.0                   ONAP Application Authorization Framework
+local/aai                      4.0.0                   ONAP Active and Available Inventory
+local/appc                     4.0.0                   Application Controller
+local/cassandra                4.0.0                   ONAP cassandra
+local/cds                      4.0.0                   ONAP Controller Design Studio (CDS)
+local/clamp                    4.0.0                   ONAP Clamp
+local/cli                      4.0.0                   ONAP Command Line Interface
+local/common                   4.0.0                   Common templates for inclusion in other charts
+local/consul                   4.0.0                   ONAP Consul Agent
+local/contrib                  4.0.0                   ONAP optional tools
+local/dcaegen2                 4.0.0                   ONAP DCAE Gen2
+local/dgbuilder                4.0.0                   D.G. Builder application
+local/dmaap                    4.0.1                   ONAP DMaaP components
+local/esr                      4.0.0                   ONAP External System Register
+local/log                      4.0.0                   ONAP Logging ElasticStack
+local/mariadb-galera           4.0.0                   Chart for MariaDB Galera cluster
+local/mongo                    4.0.0                   MongoDB Server
+local/msb                      4.0.0                   ONAP MicroServices Bus
+local/multicloud               4.0.0                   ONAP multicloud broker
+local/music                    4.0.0                   MUSIC - Multi-site State Coordination Service
+local/mysql                    4.0.0                   MySQL Server
+local/nbi                      4.0.0                   ONAP Northbound Interface
+local/network-name-gen         4.0.0                   Name Generation Micro Service
+local/nfs-provisioner          4.0.0                   NFS provisioner
+local/oof                      4.0.0                   ONAP Optimization Framework
+local/pnda                     4.0.0                   ONAP DCAE PNDA
+local/policy                   4.0.0                   ONAP Policy Administration Point
+local/pomba                    4.0.0                   ONAP Post Orchestration Model Based Audit
+local/portal                   4.0.0                   ONAP Web Portal
+local/postgres                 4.0.0                   ONAP Postgres Server
+local/robot                    4.0.0                   A helm Chart for kubernetes-ONAP Robot
+local/sdc                      4.0.0                   Service Design and Creation Umbrella Helm charts
+local/sdnc                     4.0.0                   SDN Controller
+local/sdnc-prom                4.0.0                   ONAP SDNC Policy Driven Ownership Management
+local/sniro-emulator           4.0.0                   ONAP Mock Sniro Emulator
+local/so                       4.0.0                   ONAP Service Orchestrator
+local/uui                      4.0.0                   ONAP uui
+local/vfc                      4.0.0                   ONAP Virtual Function Controller (VF-C)
+local/vid                      4.0.0                   ONAP Virtual Infrastructure Deployment
+local/vnfsdk                   4.0.0                   ONAP VNF SDK
\ No newline at end of file
index 20eb8fa..501deda 100644 (file)
@@ -25,21 +25,25 @@ available), follow the following instructions to deploy ONAP.
   > sudo cp -R ~/oom/kubernetes/helm/plugins/ ~/.helm
 
 
-**Step 3.** Customize the helm charts like onap.values.yaml or an override.yaml
-like integration-override.yaml file to suit your deployment with items like the
+**Step 3.** Customize the helm charts like oom/kubernetes/onap/values.yaml or an override
+file like onap-all.yaml, onap-vfw.yaml or openstack.yaml file to suit your deployment with items like the
 OpenStack tenant information.
 
+.. note::
+  Standard and example override files (e.g. onap-all.yaml, openstack.yaml) can be found in 
+  the oom/kubernetes/onap/resources/overrides/ directory.
+
 
  a. You may want to selectively enable or disable ONAP components by changing
     the `enabled: true/false` flags.
 
 
  b. Encyrpt the OpenStack password using the shell tool for robot and put it in
-    the robot helm charts or robot section of integration-override.yaml
+    the robot helm charts or robot section of openstack.yaml
 
 
  c. Encrypt the OpenStack password using the java based script for SO helm charts
-    or SO section of integration-override.yaml.
+    or SO section of openstack.yaml.
 
 
  d. Update the OpenStack parameters that will be used by robot, SO and APPC helm
@@ -63,9 +67,9 @@ openssl algorithm that works with the python based Robot Framework.
 .. note::
   To generate ROBOT openStackEncryptedPasswordHere :
 
-  ``root@olc-rancher:~# cd so/resources/config/mso/``
+  ``cd so/resources/config/mso/``
 
-  ``root@olc-rancher:~/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
+  ``/oom/kubernetes/so/resources/config/mso# echo -n "<openstack tenant password>" | openssl aes-128-ecb -e -K `cat encryption.key` -nosalt | xxd -c 256 -p``
 
 c. Generating SO Encrypted Password:
 The SO Encrypted Password uses a java based encryption utility since the
@@ -120,20 +124,24 @@ follows::
 **Step 8.** Once the repo is setup, installation of ONAP can be done with a
 single command
 
- a. If you updated the values directly use this command::
+.. note::
+  The --timeout 900 is currently required in Dublin to address long running initialization tasks
+  for DMaaP and SO. Without this timeout value both applications may fail to deploy.
 
-    > helm deploy dev local/onap --namespace onap
+ a. To deploy all ONAP applications use this command::
 
+    > cd oom/kubernetes
+    > helm deploy dev local/onap --namespace onap -f onap/resources/overrides/onap-all.yaml -f onap/resources/overrides/openstack.yaml --timeout 900
 
- b. If you are using an integration-override.yaml file use this command::
+ b. If you are using a custom override (e.g. integration-override.yaml) use this command::
 
-    > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap
+    > helm deploy dev local/onap -f /root/integration-override.yaml --namespace onap --timeout 900
 
 
  c. If you have a slower cloud environment you may want to use the public-cloud.yaml
     which has longer delay intervals on database updates.::
 
-    > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap
+    > helm deploy dev local/onap -f /root/oom/kubernetes/onap/resources/environments/public-cloud.yaml -f /root/integration-override.yaml --namespace onap --timeout 900
 
 
 **Step 9.** Commands to interact with the OOM installation
@@ -141,7 +149,7 @@ single command
 Use the following to monitor your deployment and determine when ONAP is
 ready for use::
 
-  > kubectl get pods --all-namespaces -o=wide
+  > kubectl get pods -n onap -o=wide
 
 Undeploying onap can be done using the following command::
 
index ebc44e6..3ccde8d 100644 (file)
@@ -23,6 +23,11 @@ This guide provides instructions on how to setup a Highly-Available Kubernetes C
 For this, we are hosting our cluster on OpenStack VMs and using the Rancher Kubernetes Engine (RKE)
 to deploy and manage our Kubernetes Cluster.
 
+.. contents::
+   :depth: 1
+   :local:
+..
+
 The result at the end of this tutorial will be:
 
 *1.* Creation of a Key Pair to use with Open Stack and RKE
@@ -42,11 +47,6 @@ The result at the end of this tutorial will be:
 There are many ways one can execute the above steps. Including automation through the use of HEAT to setup the OpenStack VMs.
 To better illustrate the steps involved, we have captured the manual creation of such an environment using the ONAP Wind River Open Lab.
 
-.. contents::
-   :depth: 1
-   :local:
-..
-
 Create Key Pair
 ===============
 A Key Pair is required to access the created OpenStack VMs and will be used by
@@ -63,9 +63,9 @@ For the purpose of this guide, we will assume a new local key called "onap-key"
 has been downloaded and is copied into **~/.ssh/**, from which it can be referenced.
 
 Example:
-  $ mv onap-key ~/.ssh
+  > mv onap-key ~/.ssh
 
-  $ chmod 600 ~/.ssh/onap-key
+  > chmod 600 ~/.ssh/onap-key
 
 
 Create Kubernetes Control Plane VMs
@@ -252,11 +252,12 @@ Run RKE
 -------
 From within the same directory as the cluster.yml file, simply execute:
 
-  $ rke up
+  > rke up
 
 The output will look something like:
 
 .. code-block::
+
   INFO[0000] Initiating Kubernetes cluster
   INFO[0000] [certificates] Generating admin certificates and kubeconfig
   INFO[0000] Successfully Deployed state file at [./cluster.rkestate]
@@ -306,15 +307,16 @@ https://storage.googleapis.com/kubernetes-release/release/v1.13.5/bin/darwin/amd
 
 Validate deployment
 -------------------
-  $ cp kube_config_cluster.yml ~/.kube/config.onap
+  > cp kube_config_cluster.yml ~/.kube/config.onap
 
-  $ export KUBECONFIG=~/.kube/config.onap
+  > export KUBECONFIG=~/.kube/config.onap
 
-  $ kubectl config use-context onap
+  > kubectl config use-context onap
 
-  $ kubectl get nodes -o=wide
+  > kubectl get nodes -o=wide
 
 .. code-block::
+
   NAME             STATUS   ROLES               AGE     VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE           KERNEL-VERSION      CONTAINER-RUNTIME
   onap-control-1   Ready    controlplane,etcd   3h53m   v1.13.5   10.0.0.8      <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
   onap-control-2   Ready    controlplane,etcd   3h53m   v1.13.5   10.0.0.11     <none>        Ubuntu 18.04 LTS   4.15.0-22-generic   docker://18.9.5
@@ -336,13 +338,22 @@ Validate deployment
 Install Helm
 ============
 
-  $ kubectl -n kube-system create serviceaccount tiller
+Example Helm client install on Linux:
+  > wget http://storage.googleapis.com/kubernetes-helm/helm-v2.12.3-linux-amd64.tar.gz
+
+  > tar -zxvf helm-v2.12.3-linux-amd64.tar.gz
+
+  > sudo mv linux-amd64/helm /usr/local/bin/helm
+
+Initialize Kubernetes Cluster for use by Helm
+---------------------------------------------
+  > kubectl -n kube-system create serviceaccount tiller
 
-  $ kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+  > kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
 
-  $ helm init --service-account tiller
+  > helm init --service-account tiller
 
-  $ kubectl -n kube-system  rollout status deploy/tiller-deploy
+  > kubectl -n kube-system  rollout status deploy/tiller-deploy
 
 
 
@@ -438,12 +449,12 @@ Click :download:`slave_nfs_node.sh <slave_nfs_node.sh>` to download the script.
 The master_nfs_node.sh script runs in the NFS Master node and needs the list of
 NFS Slave nodes as input, e.g.::
 
-    $ sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip
+    > sudo ./master_nfs_node.sh node1_ip node2_ip ... nodeN_ip
 
 The slave_nfs_node.sh script runs in each NFS Slave node and needs the IP of
 the NFS Master node as input, e.g.::
 
-    $ sudo ./slave_nfs_node.sh master_node_ip
+    > sudo ./slave_nfs_node.sh master_node_ip
 
 
 ONAP Deployment via OOM
index 1b28e45..e67a94e 160000 (submodule)
@@ -1 +1 @@
-Subproject commit 1b28e45136d5096ef4c07f4142c76b45224b3cf4
+Subproject commit e67a94e6be333271c8237d6ebd5fb0f489401350
index 4534d6b..a806e77 100644 (file)
@@ -56,8 +56,9 @@ config:
         "clamp.config.files.sdcController": "file:/opt/clamp/sdc-controllers-config.json",
         "clamp.config.dcae.inventory.url": "http://inventory.{{ include "common.namespace" . }}:8080",
         "clamp.config.dcae.dispatcher.url": "https4://deployment-handler.{{ include "common.namespace" . }}:8443",
-        "clamp.config.dcae.dispatcher.userName":"test",
-        "clamp.config.dcae.dispatcher.password":"test",
+        "clamp.config.dcae.deployment.url": "https4://deployment-handler.{{ include "common.namespace" . }}:8443",
+        "clamp.config.dcae.deployment.userName": "none",
+        "clamp.config.dcae.deployment.password": "none",
         "clamp.config.policy.api.url": "http4://policy-api.{{ include "common.namespace" . }}:6969",
         "clamp.config.policy.api.userName": "healthcheck",
         "clamp.config.policy.api.password": "zb!XztG34",
index 0dc9919..6c5bb9a 100644 (file)
@@ -90,7 +90,7 @@ postgres:
 
 # application image
 repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.15
+image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.16
 default_k8s_location: central
 
 # DCAE component images to be deployed via Cloudify Manager
index 74abbb0..1db11ad 100644 (file)
       "interval": 600
     },
     "policy_engine": {
-      "url": "https://{{ .Values.config.address.policy_pdp }}.{{include "common.namespace" . }}:8081",
-      "path_decision": "/decision/v1",
+      "url": "https://{{ .Values.config.address.policy_xacml_pdp }}:6969",
+      "path_decision": "/policy/pdpx/v1/decision"
       "path_notifications": "/pdp/notifications",
       "path_api": "/pdp/api/",
       "headers": {
         "Accept": "application/json",
         "Content-Type": "application/json",
         "ClientAuth": "cHl0aG9uOnRlc3Q=",
-        "Authorization": "Basic dGVzdHBkcDphbHBoYTEyMw==",
+        "Authorization": "Basic aGVhbHRoY2hlY2s6emIhWHp0RzM0",
         "Environment": "TEST"
       },
       "target_entity": "policy_engine",
index fa52f6f..3b15c55 100644 (file)
@@ -40,6 +40,7 @@ config:
     consul:
       host: consul-server
       port: 8500
+    policy_xacml_pdp: policy-xacml-pdp
 
 #################################################################
 # Application configuration defaults.
index 5406ade..6b97414 100644 (file)
@@ -30,7 +30,7 @@ pullPolicy: Always
 
 # application images
 repository: nexus3.onap.org:10001
-image: onap/dmaap/dmaap-bc:1.1.4-STAGING-latest
+image: onap/dmaap/dmaap-bc:1.1.5
 
 
 # application configuration
index 1c18bb2..aa5165d 100644 (file)
@@ -22,7 +22,7 @@ global:
   readinessImage: readiness-check:2.0.0
   loggingRepository: docker.elastic.co
   loggingImage: beats/filebeat:5.5.0
-  clientImage: onap/dmaap/dbc-client:1.0.8-STAGING-latest
+  clientImage: onap/dmaap/dbc-client:1.0.9
 # application configuration
 config:
   logstashServiceName: log-ls
index a3c5867..a8294d2 100644 (file)
@@ -21,17 +21,17 @@ appc:
     openStackType: "OpenStackProvider"
     openStackName: "OpenStack"
     openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
-    openStackServiceTenantName: "Integration-OOM-Staging-Daily"
+    openStackServiceTenantName: "OPENSTACK_TENANTNAME_HERE"
     openStackDomain: "Default"
-    openStackUserName: "demo"
-    openStackEncryptedPassword: "onapdemo"
+    openStackUserName: "OPENSTACK_USERNAME_HERE"
+    openStackEncryptedPassword: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 robot:
   appcUsername: "appc@appc.onap.org"
   appcPassword: "demo123456!"
   openStackKeyStoneUrl: "http://10.12.25.2:5000"
   openStackPublicNetId: "971040b2-7059-49dc-b220-4fab50cb2ad4"
   openStackTenantId: "09d8566ea45e43aa974cf447ed591d77"
-  openStackUserName: "demo"
+  openStackUserName: "OPENSTACK_USERNAME_HERE"
   ubuntu14Image: "ubuntu-14-04-cloud-amd64"
   ubuntu16Image: "ubuntu-16-04-cloud-amd64"
   openStackPrivateNetId: "c7824f00-bef7-4864-81b9-f6c3afabd313"
@@ -47,18 +47,14 @@ robot:
   rancherIpAddress: "10.12.5.127"
   config:
     # openStackEncryptedPasswordHere should match the encrypted string used in SO and APPC and overridden per environment
-    openStackEncryptedPasswordHere: "bbaef6cd76625ab9eb60deedeae7dbb9"
+    openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 so:
   # so server configuration
   so-catalog-db-adapter:
     config:
-      openStackUserName: "demo"
+      openStackUserName: "OPENSTACK_USERNAME_HERE"
       openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
-      openStackEncryptedPasswordHere: "1E82B3AF7ACF458C3A6058DF4DD5FD5E526FDAFAF163589C5F85F80CD7AEC09E034F375B"
-  # configure embedded mariadb
-  mariadb:
-    config:
-      mariadbRootPassword: password
+      openStackEncryptedPasswordHere: "XXXXXXXXXXXXXXXXXXXXXXXX_OPENSTACK_ENCRYPTED_PASSWORD_HERE_XXXXXXXXXXXXXXXX"
 nbi:
   config:
     # openstack configuration
index 0e4ee0e..e7c6928 100644 (file)
@@ -70,15 +70,15 @@ POLICY_PDP_PAP_API_SECRET=
 
 # PAP
 
-PAP_HOST={{.Values.global.pap.nameOverride}}
-PAP_USERNAME=testpap
-PAP_PASSWORD=alpha123
+PAP_HOST=policy-pap
+PAP_USERNAME=healthcheck
+PAP_PASSWORD=zb!XztG34
 
 # PDP-X
 
-PDP_HOST={{.Values.global.pdp.nameOverride}}
-PDP_USERNAME=testpdp
-PDP_PASSWORD=alpha123
+PDP_HOST=policy-xacml-pdp
+PDP_USERNAME=healthcheck
+PDP_PASSWORD=zb!XztG34
 PDP_CLIENT_USERNAME=python
 PDP_CLIENT_PASSWORD=test
 PDP_ENVIRONMENT=TEST
diff --git a/kubernetes/policy/charts/policy-xacml-pdp/resources/config/xacml.properties b/kubernetes/policy/charts/policy-xacml-pdp/resources/config/xacml.properties
new file mode 100644 (file)
index 0000000..f4b4f93
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Properties that the embedded PDP engine uses to configure and load
+#
+# Standard API Factories
+#
+xacml.dataTypeFactory=com.att.research.xacml.std.StdDataTypeFactory
+xacml.pdpEngineFactory=com.att.research.xacmlatt.pdp.ATTPDPEngineFactory
+xacml.pepEngineFactory=com.att.research.xacml.std.pep.StdEngineFactory
+xacml.pipFinderFactory=com.att.research.xacml.std.pip.StdPIPFinderFactory
+xacml.traceEngineFactory=com.att.research.xacml.std.trace.LoggingTraceEngineFactory
+#
+# AT&T PDP Implementation Factories
+#
+xacml.att.evaluationContextFactory=com.att.research.xacmlatt.pdp.std.StdEvaluationContextFactory
+xacml.att.combiningAlgorithmFactory=com.att.research.xacmlatt.pdp.std.StdCombiningAlgorithmFactory
+xacml.att.functionDefinitionFactory=com.att.research.xacmlatt.pdp.std.StdFunctionDefinitionFactory
+#
+# ONAP PDP Implementation Factories
+#
+xacml.att.policyFinderFactory=org.onap.policy.pdp.xacml.application.common.OnapPolicyFinderFactory
+
+#
+# Use a root combining algorithm
+#
+xacml.att.policyFinderFactory.combineRootPolicies=urn:oasis:names:tc:xacml:3.0:policy-combining-algorithm:deny-overrides
+
+#
+# PIP Engine Definitions
+#
+count-recent-operations.classname=org.onap.policy.pdp.xacml.application.common.operationshistory.CountRecentOperationsPip
+count-recent-operations.issuer=urn:org:onap:xacml:guard:count-recent-operations
+count-recent-operations.name=CountRecentOperations
+count-recent-operations.description=Returns operation counts based on time window
+count-recent-operations.persistenceunit=OperationsHistoryPU
+
+get-operation-outcome.classname=org.onap.policy.pdp.xacml.application.common.operationshistory.GetOperationOutcomePip
+get-operation-outcome.issuer=urn:org:onap:xacml:guard:get-operation-outcome
+get-operation-outcome.name=GetOperationOutcome
+get-operation-outcome.description=Returns operation outcome
+get-operation-outcome.persistenceunit=OperationsHistoryPU
+
+#
+# Make pips available to finder
+#
+xacml.pip.engines=count-recent-operations,get-operation-outcome
+
+#
+# JPA Properties
+#
+javax.persistence.jdbc.driver=org.mariadb.jdbc.Driver
+javax.persistence.jdbc.url=jdbc:mariadb://{{ .Values.global.mariadb.nameOverride }}:3306/operationshistory
+javax.persistence.jdbc.user=policy_user
+javax.persistence.jdbc.password=cG9saWN5X3VzZXI=
\ No newline at end of file
index 1cd9290..85fda35 100644 (file)
@@ -135,8 +135,8 @@ onap_application_name=
 
 #-----------------------ONAP-PORTAL-Properties----------------------
 
-ONAP_REDIRECT_URL=https://portal-app.{{.Release.Namespace}}:30225/ONAPPORTAL/login.htm
-ONAP_REST_URL=https://portal-app:8443/ONAPPORTAL/auxapi
+ONAP_REDIRECT_URL=https://portal.api.simpledemo.onap.org:30225/ONAPPORTAL/login.htm
+ONAP_REST_URL=https://portal-app:30225/ONAPPORTAL/auxapi
 ONAP_UEB_URL_LIST=
 ONAP_PORTAL_INBOX_NAME=
 ONAP_UEB_APP_KEY=ueb_key_5
index a59e3b6..5d42f04 100755 (executable)
 #!/bin/bash
 
 #
-# Run the testsuite for the passed tag. Valid tags are ete, health, closedloop, instantiate
+# Run the testsuite for the passed tag. Valid tags are listed in usage help
 # Please clean up logs when you are done...
-# Note: Do not run multiple concurrent ete.sh as the --display is not parameterized and tests will collide
 #
 if [ "$1" == "" ] || [ "$2" == "" ]; then
-   echo "Usage: ete-k8s.sh [namespace] [ health | healthdist | distribute | instantiate | instantiateVFWCL | instantiateDemoVFWCL |  | portal ]"
+   echo "Usage: ete-k8s.sh [namespace] [tag]"
+   echo ""
+   echo "  List of test case tags (filename for intent: tag)"
+   echo ""
+   echo "  cds.robot: cds"
+   echo ""
+   echo "  clamp.robot: clamp"
+   echo ""
+   echo "  demo.robot: InitDemo, InitCustomer, APPCCDTPreloadDemo, APPCMountPointDemo, DistributeDemoVFWDT, DistributeVFWNG,"
+   echo "              InitDistribution, PreloadDemo, deleteVNF, heatbridge, instantiateDemoVFWCL, instantiateVFW, instantiateVFWCL, instantiateVFWDT"
+   echo ""
+   echo "  health-check.robot: health, core, small, medium, 3rdparty, api, datarouter, externalapi, health-aaf, health-aai, health-appc,"
+   echo "                      health-clamp, health-cli, health-dcae, health-dmaap, health-log, health-modeling, health-msb,"
+   echo "                      health-multicloud, health-oof, health-policy, health-pomba, health-portal, health-sdc, health-sdnc,"
+   echo "                      health-so, health-uui, health-vfc, health-vid, health-vnfsdk, healthdist, healthlogin, healthmr,"
+   echo "                      healthportalapp, multicloud, oom"
+   echo ""
+   echo " hvves.robot: HVVES, ete"
+   echo ""
+   echo " model-distribution-vcpe.robot: distributevCPEResCust"
+   echo ""
+   echo " model-distribution.robot: distribute, distributeVFWDT, distributeVLB"
+   echo ""
+   echo " oof-*.robot: cmso, has, homing"
+   echo ""
+   echo " pnf-registration.robot: ete, pnf_registrate"
    echo ""
-   echo "  List of test case tags (filename for intent: tag) "
-   echo " "
-   echo "  cds.robot: cds "
-   echo " "
-   echo "  clamp.robot: clamp "
-   echo " "
-   echo "  demo.robot: InitDemo, InitCustomer , APPCCDTPreloadDemo, APPCMountPointDemo, DistributeDemoVFWDT, DistributeVFWNG,  "
-   echo "              InitDistribution, PreloadDemo, deleteVNF, heatbridge, instantiateDemoVFWCL, instantiateVFW, instantiateVFWCL, instantiateVFWDT "
-   echo " "
-   echo "  health-check.robot: health , core, small, medium, 3rdparty, api,  datarouter, externalapi, health-aaf, health-aai, health-appc, "
-   echo "                      health-clamp, health-cli, health-dcae, health-dmaap, health-log, health-modeling, health-msb, "
-   echo "                      health-multicloud, health-oof, health-policy, health-pomba, health-portal, health-sdc, health-sdnc, "
-   echo "                      health-so, health-uui, health-vfc, health-vid, health-vnfsdk, healthdist, healthlogin, healthmr, "
-   echo "                      healthportalapp, multicloud, oom "
-   echo " "
-   echo " hvves.robot: :HVVES, ete "
-   echo " "
-   echo " model-distribution-vcpe.robot: distributevCPEResCust "
-   echo " "
-   echo " model-distribution.robot: distribute, distributeVFWDT, distributeVLB "
-   echo " "
-   echo " oof-*.robot: cmso , has, homing "
-   echo " "
-   echo " pnf-registration.robot: ete, pnf_registrate "
-   echo " "
    echo " post-install-tests.robot dmaapacl, postinstall"
-   echo " "
-   echo " update_onap_page.robot: UpdateWebPage "
-   echo " "
-   echo " vnf-orchestration-direct-so.robot:  instantiateVFWdirectso "
-   echo " "
+   echo ""
+   echo " update_onap_page.robot: UpdateWebPage"
+   echo ""
+   echo " vnf-orchestration-direct-so.robot: instantiateVFWdirectso"
+   echo ""
    echo " vnf-orchestration.robot: instantiate, instantiateNoDelete, stability72hr"
    exit
 fi
@@ -63,7 +62,6 @@ export NAMESPACE="$1"
 
 POD=$(kubectl --namespace $NAMESPACE get pods | sed 's/ .*//'| grep robot)
 
-
 TAGS="-i $2"
 
 ETEHOME=/var/opt/ONAP
index 02b79f3..c58d8a8 100755 (executable)
 #!/bin/bash
 
 #
-# Run the testsuite for the passed tag. Valid tags are ete, health, closedloop, instantiate
+# Run the health-check testsuites for the tags discovered by helm list
 # Please clean up logs when you are done...
-# Note: Do not run multiple concurrent ete.sh as the --display is not parameterized and tests will collide
 #
 if [ "$1" == "" ] ;  then
-   echo "Usage: eteHelm-k8s.sh namespace  "
-   echo " list projects via helm list and runs health-check with those tags except dev and dev-consul "
+   echo "Usage: eteHelm-k8s.sh [namespace]"
+   echo " list projects via helm list and runs health-check with those tags except dev and dev-consul"
    exit
 fi
 
index c284f2d..c1babf3 100644 (file)
@@ -70,7 +70,45 @@ spec:
             value: {{ .Values.config.javaOptions }}
           - name: BACKEND
             value: {{ .Values.config.backendServerURL }}
+          - name: IS_HTTPS
+            value: "{{ .Values.config.isHttpsEnabled}}"
+            {{ if and .Values.config.isHttpsEnabled (eq .Values.security.isDefaultStore false) }}
+          - name: KEYSTORE_PASS
+            {{- if .Values.global.security.keysFromCa }}
+            valueFrom:
+              secretKeyRef:
+                name: mft-sdc
+                key: keystore-password.txt
+            {{ else }}
+            value: {{ .Values.global.security.keyStorePass}}
+            {{- end }}
+          - name: TRUSTSTORE_PASS
+            {{- if .Values.global.security.keysFromCa }}
+            valueFrom:
+              secretKeyRef:
+                name: mft-catruststore
+                key: keystore-password.txt
+            {{ else }}
+            value: {{ .Values.global.security.trustStorePass}}
+            {{- end }}
+          - name: TRUSTSTORE_PATH
+            value: "{{ .Values.security.storePath }}/{{ .Values.security.truststoreFilename }}"
+          - name: KEYSTORE_PATH
+            value: "{{ .Values.security.storePath }}/{{ .Values.security.keystoreFilename }}"
+          - name: TRUSTSTORE_TYPE
+            value: {{ .Values.security.truststore.type }}
+          - name: KEYSTORE_TYPE
+            value: {{ .Values.security.keystore.type }}
+            {{ end }}
           volumeMounts:
+          {{ if and .Values.config.isHttpsEnabled (eq .Values.security.isDefaultStore false) }}
+          - name: {{ include "common.fullname" . }}-jetty-https-truststore
+            mountPath: /var/lib/jetty/{{ .Values.security.storePath }}/{{ .Values.security.truststoreFilename }}
+            subPath: {{ .Values.security.truststoreFilename }}
+          - name: {{ include "common.fullname" . }}-jetty-https-keystore
+            mountPath: /var/lib/jetty/etc/{{ .Values.security.storePath }}/{{ .Values.security.keystoreFilename }}
+            subPath: {{ .Values.security.keystoreFilename }}
+          {{ end }}
           - name: {{ include "common.fullname" . }}-localtime
             mountPath: /etc/localtime
             readOnly: true
index 2990de3..87ca360 100644 (file)
@@ -40,10 +40,16 @@ spec:
     - port: {{ .Values.service.internalPort }}
       nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
       name: {{ .Values.service.portName | default "http" }}
+    - port: {{ .Values.service.internalPort2 }}
+      nodePort: {{ .Values.global.nodePortPrefixExt | default .Values.nodePortPrefixExt }}{{ .Values.service.nodePort2 }}
+      name: {{ .Values.service.portName2 | default "https" }}
     {{- else -}}
     - port: {{ .Values.service.externalPort }}
       targetPort: {{ .Values.service.internalPort }}
       name: {{ .Values.service.portName | default "http" }}
+    - port: {{ .Values.service.externalPort2 }}
+      targetPort: {{ .Values.service.internalPort2 }}
+      name: {{ .Values.service.portName2 | default "https" }}
     {{- end}}
   selector:
     app: {{ include "common.name" . }}
index d0ff537..a217de5 100644 (file)
@@ -17,6 +17,7 @@
 #################################################################
 global:
   nodePortPrefix: 302
+  nodePortPrefixExt: 304
   readinessRepository: oomk8s
   readinessImage: readiness-check:2.0.2
   loggingRepository: docker.elastic.co
@@ -36,6 +37,16 @@ debugEnabled: false
 config:
   javaOptions: "-Xdebug -agentlib:jdwp=transport=dt_socket,address=7000,server=y,suspend=n -Xmx256m -Xms256m"
   backendServerURL: "http://sdc-wfd-be:8080"
+  isHttpsEnabled: false
+
+# https relevant settings. Change in case you have other trust files then default ones.
+security:
+  isDefaultStore: true
+  truststoreType: "JKS"
+  keystoreType: "JKS"
+  truststoreFilename: "truststore"
+  keystoreFilename: "keystore"
+  storePath: "etc"
 
 # default number of instances
 replicaCount: 1
@@ -62,6 +73,10 @@ service:
   externalPort: 8080
   portName: sdc-wfd-fe
   nodePort: "56"
+  portName2: sdc-wfd-fe2
+  internalPort2: 8443
+  externalPort2: 8443
+  nodePort2: "31"
 
 ingress:
   enabled: false