Merge "Install/uninstall helm chart"
authorMandeep Khinda <Mandeep.Khinda@amdocs.com>
Thu, 15 Feb 2018 16:17:08 +0000 (16:17 +0000)
committerGerrit Code Review <gerrit@onap.org>
Thu, 15 Feb 2018 16:17:08 +0000 (16:17 +0000)
408 files changed:
.gitignore
.idea/vcs.xml [deleted file]
TOSCA/kubernetes-cluster-TOSCA/LICENSE [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/README.md [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/scripts/create.py [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh [new file with mode: 0644]
TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py [new file with mode: 0644]
docs/OOM User Guide/oom_user_guide.rst
docs/release-notes.rst
kubernetes/README_HELM
kubernetes/aaf/resources/config/aaf-cs-data/ecomp.cql [moved from kubernetes/config/docker/init/src/config/aaf/data/ecomp.cql with 100% similarity]
kubernetes/aaf/resources/config/aaf-cs-data/identities.dat [moved from kubernetes/config/docker/init/src/config/aaf/data/identities.dat with 100% similarity]
kubernetes/aaf/resources/config/aaf-cs-data/identities.idx [moved from kubernetes/config/docker/init/src/config/aaf/data/identities.idx with 100% similarity]
kubernetes/aaf/resources/config/aaf-cs-data/init.cql [moved from kubernetes/config/docker/init/src/config/aaf/data/init.cql with 100% similarity]
kubernetes/aaf/resources/config/aaf-data/identities.dat [moved from kubernetes/config/docker/init/src/config/aaf/data2/identities.dat with 100% similarity]
kubernetes/aaf/templates/aaf-configmap.yaml [new file with mode: 0644]
kubernetes/aaf/templates/aaf-cs-deployment.yaml
kubernetes/aaf/templates/aaf-deployment.yaml
kubernetes/aaf/templates/aaf-secret.yaml [new file with mode: 0644]
kubernetes/aai/resources/config/aai-data/chef-config/dev/.knife/solo.rb [moved from kubernetes/config/docker/init/src/config/aai/aai-data/chef-config/dev/.knife/solo.rb with 100% similarity]
kubernetes/aai/resources/config/aai-data/environments/README.md [moved from kubernetes/config/docker/init/src/config/aai/aai-data/environments/README.md with 100% similarity]
kubernetes/aai/resources/config/aai-data/environments/simpledemo.json [moved from kubernetes/config/docker/init/src/config/aai/aai-data/environments/simpledemo.json with 100% similarity]
kubernetes/aai/resources/config/aai-data/environments/solo.json [moved from kubernetes/config/docker/init/src/config/aai/aai-data/environments/solo.json with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/auth/client-cert-onap.p12 [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/auth/client-cert-onap.p12 with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/auth/data-router_policy.json [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/auth/data-router_policy.json with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/auth/tomcat_keystore [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/auth/tomcat_keystore with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/data-router.properties [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/data-router.properties with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/model/aai_oxm_v10.xml [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/model/aai_oxm_v10.xml with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/model/aai_oxm_v11.xml [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/model/aai_oxm_v11.xml with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/model/aai_oxm_v8.xml [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/model/aai_oxm_v8.xml with 100% similarity]
kubernetes/aai/resources/config/data-router/appconfig/model/aai_oxm_v9.xml [moved from kubernetes/config/docker/init/src/config/aai/data-router/appconfig/model/aai_oxm_v9.xml with 100% similarity]
kubernetes/aai/resources/config/data-router/dynamic/conf/entity-event-policy.xml [moved from kubernetes/config/docker/init/src/config/aai/data-router/dynamic/conf/entity-event-policy.xml with 100% similarity]
kubernetes/aai/resources/config/data-router/dynamic/routes/entity-event.route [moved from kubernetes/config/docker/init/src/config/aai/data-router/dynamic/routes/entity-event.route with 100% similarity]
kubernetes/aai/resources/config/elasticsearch/config/elasticsearch.yml [moved from kubernetes/config/docker/init/src/config/aai/elasticsearch/config/elasticsearch.yml with 100% similarity]
kubernetes/aai/resources/config/haproxy/haproxy.cfg [moved from kubernetes/config/docker/init/src/config/aai/haproxy/haproxy.cfg with 100% similarity]
kubernetes/aai/resources/config/log/filebeat/filebeat.yml [new file with mode: 0644]
kubernetes/aai/resources/config/log/model-loader/logback.xml [moved from kubernetes/aai/resources/model-loader/conf/logback.xml with 100% similarity]
kubernetes/aai/resources/config/log/resources/logback.xml [moved from kubernetes/aai/resources/resources/conf/logback.xml with 100% similarity]
kubernetes/aai/resources/config/log/search-data-service/logback.xml [moved from kubernetes/aai/resources/search-data-service/conf/logback.xml with 100% similarity]
kubernetes/aai/resources/config/log/sparky-be/logback.xml [moved from kubernetes/aai/resources/sparky-be/conf/logback.xml with 100% similarity]
kubernetes/aai/resources/config/log/traversal/logback.xml [moved from kubernetes/aai/resources/traversal/conf/logback.xml with 100% similarity]
kubernetes/aai/resources/config/model-loader/appconfig/auth/aai-os-cert.p12 [moved from kubernetes/config/docker/init/src/config/aai/model-loader/appconfig/auth/aai-os-cert.p12 with 100% similarity]
kubernetes/aai/resources/config/model-loader/appconfig/model-loader.properties [moved from kubernetes/config/docker/init/src/config/aai/model-loader/appconfig/model-loader.properties with 100% similarity]
kubernetes/aai/resources/config/search-data-service/appconfig/analysis-config.json [moved from kubernetes/config/docker/init/src/config/aai/search-data-service/appconfig/analysis-config.json with 100% similarity]
kubernetes/aai/resources/config/search-data-service/appconfig/auth/search_policy.json [moved from kubernetes/config/docker/init/src/config/aai/search-data-service/appconfig/auth/search_policy.json with 100% similarity]
kubernetes/aai/resources/config/search-data-service/appconfig/auth/tomcat_keystore [moved from kubernetes/config/docker/init/src/config/aai/search-data-service/appconfig/auth/tomcat_keystore with 100% similarity]
kubernetes/aai/resources/config/search-data-service/appconfig/elastic-search.properties [moved from kubernetes/config/docker/init/src/config/aai/search-data-service/appconfig/elastic-search.properties with 100% similarity]
kubernetes/aai/resources/config/search-data-service/appconfig/filter-config.json [moved from kubernetes/config/docker/init/src/config/aai/search-data-service/appconfig/filter-config.json with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/aai.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/aai.properties with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/auth/aai-os-cert.p12 [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/auth/aai-os-cert.p12 with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/auth/client-cert-onap.p12 [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/auth/client-cert-onap.p12 with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/auth/inventory-ui-keystore [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/auth/inventory-ui-keystore with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/elasticsearch.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/elasticsearch.properties with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/model/aai_oxm_v9.xml [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/model/aai_oxm_v9.xml with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/portal/portal-authentication.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/portal/portal-authentication.properties with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/portal/portal.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/portal/portal.properties with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/roles.config [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/roles.config with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/search-service.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/search-service.properties with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/suggestive-search.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/suggestive-search.properties with 100% similarity]
kubernetes/aai/resources/config/sparky-be/appconfig/synchronizer.properties [moved from kubernetes/config/docker/init/src/config/aai/sparky-be/appconfig/synchronizer.properties with 100% similarity]
kubernetes/aai/templates/aai-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/aai-deployment.yaml
kubernetes/aai/templates/aai-filebeat-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/aai-resources-deployment.yaml
kubernetes/aai/templates/aai-resources-traversal-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/aai-traversal-deployment.yaml
kubernetes/aai/templates/all-services.yaml
kubernetes/aai/templates/data-router-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/data-router-deployment.yaml
kubernetes/aai/templates/elasticsearch-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/elasticsearch-deployment.yaml
kubernetes/aai/templates/hbase-deployment.yaml
kubernetes/aai/templates/modelloader-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/modelloader-deployment.yaml
kubernetes/aai/templates/search-data-service-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/search-data-service-deployment.yaml
kubernetes/aai/templates/sparky-be-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/aai/templates/sparky-be-deployment.yaml
kubernetes/aai/values.yaml
kubernetes/appc/resources/config/conf/aaiclient.properties [moved from kubernetes/config/docker/init/src/config/appc/conf/aaiclient.properties with 100% similarity]
kubernetes/appc/resources/config/conf/appc.properties [moved from kubernetes/config/docker/init/src/config/appc/conf/appc.properties with 100% similarity]
kubernetes/appc/resources/config/log/filebeat/log4j/filebeat.yml [new file with mode: 0644]
kubernetes/appc/resources/config/log/org.ops4j.pax.logging.cfg [moved from kubernetes/config/docker/init/src/config/log/appc/org.ops4j.pax.logging.cfg with 100% similarity]
kubernetes/appc/templates/appc-conf-configmap.yaml [new file with mode: 0644]
kubernetes/appc/templates/appc-deployment.yaml
kubernetes/appc/templates/appc-log-configmap.yaml [new file with mode: 0644]
kubernetes/appc/templates/db-deployment.yaml
kubernetes/appc/templates/dgbuilder-deployment.yaml
kubernetes/appc/values.yaml
kubernetes/clamp/resources/config/mariadb/conf.d/conf1/my.cnf [moved from kubernetes/config/docker/init/src/config/clamp/mariadb/conf.d/conf1/my.cnf with 100% similarity]
kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql [moved from kubernetes/config/docker/init/src/config/clamp/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql with 100% similarity]
kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql [moved from kubernetes/config/docker/init/src/config/clamp/mariadb/docker-entrypoint-initdb.d/bulkload/clds-stored-procedures.sql with 100% similarity]
kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql [moved from kubernetes/config/docker/init/src/config/clamp/mariadb/docker-entrypoint-initdb.d/drop/clds-drop-db-objects.sql with 100% similarity]
kubernetes/clamp/resources/config/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh [moved from kubernetes/config/docker/init/src/config/clamp/mariadb/docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh with 100% similarity]
kubernetes/clamp/templates/clamp-mariadb-configmap.yaml [new file with mode: 0644]
kubernetes/clamp/templates/clamp-mariadb-deployment.yaml
kubernetes/cli/templates/all-service.yaml
kubernetes/cli/templates/cli-deployment.yaml
kubernetes/config/docker/init/config-init.sh
kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json [new file with mode: 0644]
kubernetes/config/docker/init/src/config/log/policy/drools/logback.xml [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log.00000001 [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log_control [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/debian-10.0.flag [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/firstrun [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile0 [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile1 [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/ibdata1 [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/log/db.opt [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/multi-master.info [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSM [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSV [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.ibd [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.ibd [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.ibd [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSM [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSV [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYD [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYI [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/accounts.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/cond_instances.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/db.opt [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_current.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history_long.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_account_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_host_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_thread_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_user_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_global_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_current.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history_long.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_account_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_digest.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_host_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_thread_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_user_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_global_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_current.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history_long.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_account_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_host_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_instance.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_thread_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_user_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_global_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_instances.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_instance.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/host_cache.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/hosts.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/mutex_instances.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/objects_summary_global_by_type.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/performance_timers.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/rwlock_instances.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_account_connect_attrs.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_connect_attrs.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_actors.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_consumers.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_instruments.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_objects.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_timers.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_instances.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_event_name.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_instance.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_index_usage.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_table.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_lock_waits_summary_by_table.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/threads.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/users.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db.opt [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.frm [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.ibd [deleted file]
kubernetes/config/docker/init/src/config/policy/mariadb/data/xacml/db.opt [deleted file]
kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/push-policies.sh [deleted file]
kubernetes/config/prepull_docker.sh
kubernetes/config/values.yaml
kubernetes/kube2msb/values.yaml
kubernetes/log/templates/all-services.yaml
kubernetes/log/templates/elasticsearch-deployment.yaml
kubernetes/log/templates/logstash-deployment.yaml
kubernetes/log/values.yaml
kubernetes/message-router/templates/message-router-dmaap.yaml
kubernetes/message-router/templates/message-router-kafka.yaml
kubernetes/message-router/templates/message-router-zookeeper.yaml
kubernetes/message-router/values.yaml
kubernetes/msb/templates/msb-discovery-deployment.yaml
kubernetes/msb/templates/msb-eag-deployment.yaml
kubernetes/msb/templates/msb-iag-deployment.yaml
kubernetes/msb/values.yaml
kubernetes/mso/resources/config/docker-files/scripts/start-jboss-server.sh [moved from kubernetes/config/docker/init/src/config/mso/docker-files/scripts/start-jboss-server.sh with 100% similarity]
kubernetes/mso/resources/config/log/filebeat/filebeat.yml [new file with mode: 0644]
kubernetes/mso/resources/config/log/logback.apihandler-infra.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.apihandler-infra.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.appc.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.appc.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.asdc.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.asdc.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.bpmn.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.bpmn.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.msorequestsdbadapter.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.msorequestsdbadapter.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.network.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.network.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.sdnc.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.sdnc.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.tenant.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.tenant.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.vfc.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.vfc.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.vnf.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.vnf.xml with 100% similarity]
kubernetes/mso/resources/config/log/logback.workflow-message-adapter.xml [moved from kubernetes/config/docker/init/src/config/log/mso/logback.workflow-message-adapter.xml with 100% similarity]
kubernetes/mso/resources/config/mariadb/conf.d/mariadb1.cnf [moved from kubernetes/config/docker/init/src/config/mso/mariadb/conf.d/mariadb1.cnf with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/01-load-default-sql-files.sh [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/01-load-default-sql-files.sh with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/02-load-additional-changes.sh [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/02-load-additional-changes.sh with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/create_mso_db-tests.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/create_mso_db-tests.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mariadb_engine_7.7.3-ee.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mariadb_engine_7.7.3-ee.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql with 100% similarity]
kubernetes/mso/resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql [moved from kubernetes/config/docker/init/src/config/mso/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql with 100% similarity]
kubernetes/mso/resources/config/mso/aai.crt [moved from kubernetes/config/docker/init/src/config/mso/mso/aai.crt with 100% similarity]
kubernetes/mso/resources/config/mso/encryption.key [moved from kubernetes/config/docker/init/src/config/mso/mso/encryption.key with 100% similarity]
kubernetes/mso/resources/config/mso/mso-docker.json [moved from kubernetes/config/docker/init/src/config/mso/mso/mso-docker.json with 50% similarity]
kubernetes/mso/templates/db-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/mso/templates/db-deployment.yaml
kubernetes/mso/templates/mso-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/mso/templates/mso-deployment.yaml
kubernetes/mso/templates/mso-log-configmap.yaml [new file with mode: 0644]
kubernetes/mso/values.yaml
kubernetes/oneclick/createAll.bash
kubernetes/oneclick/tools/autoCleanConfig.bash [new file with mode: 0644]
kubernetes/oneclick/tools/autoCreateConfig.bash [new file with mode: 0644]
kubernetes/oneclick/tools/collectInfo.bash [new file with mode: 0644]
kubernetes/policy/resources/config/drools/settings.xml [moved from kubernetes/config/docker/init/src/config/policy/drools/settings.xml with 100% similarity]
kubernetes/policy/resources/config/log/drools/logback.xml [new file with mode: 0644]
kubernetes/policy/resources/config/log/ep_sdk_app/logback.xml [moved from kubernetes/config/docker/init/src/config/log/policy/ep_sdk_app/logback.xml with 100% similarity]
kubernetes/policy/resources/config/log/filebeat/filebeat.yml [new file with mode: 0644]
kubernetes/policy/resources/config/log/pypdpserver/logback.xml [moved from kubernetes/config/docker/init/src/config/log/policy/pypdpserver/logback.xml with 100% similarity]
kubernetes/policy/resources/config/log/xacml-pap-rest/logback.xml [moved from kubernetes/config/docker/init/src/config/log/policy/xacml-pap-rest/logback.xml with 100% similarity]
kubernetes/policy/resources/config/log/xacml-pdp-rest/logback.xml [moved from kubernetes/config/docker/init/src/config/log/policy/xacml-pdp-rest/logback.xml with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/drools/base.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/drools/base.conf with 83% similarity]
kubernetes/policy/resources/config/opt/policy/config/drools/drools-tweaks.sh [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/drools/drools-tweaks.sh with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/drools/feature-healthcheck.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/drools/feature-healthcheck.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/drools/policy-keystore [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/drools/policy-keystore with 100% similarity, mode: 0644]
kubernetes/policy/resources/config/opt/policy/config/drools/policy-management.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/drools/policy-management.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/base.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/base.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/brmsgw-tweaks.sh [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/brmsgw-tweaks.sh with 92% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/brmsgw.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/brmsgw.conf with 87% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/console.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/console.conf with 98% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/elk.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/elk.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/mysql.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/mysql.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/pap-tweaks.sh [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/pap-tweaks.sh with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/pap.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/pap.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/paplp.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/paplp.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/pdp-tweaks.sh [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/pdp-tweaks.sh with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/pdp.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/pdp.conf with 94% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/pdplp.conf [moved from kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/pdplp.conf with 100% similarity]
kubernetes/policy/resources/config/opt/policy/config/pe/push-policies.sh [new file with mode: 0755]
kubernetes/policy/scripts/update-vfw-op-policy.sh [new file with mode: 0755]
kubernetes/policy/templates/all-services.yaml
kubernetes/policy/templates/dep-brmsgw.yaml
kubernetes/policy/templates/dep-drools.yaml
kubernetes/policy/templates/dep-maria.yaml
kubernetes/policy/templates/dep-pap.yaml
kubernetes/policy/templates/dep-pdp.yaml
kubernetes/policy/templates/policy-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/policy/templates/policy-deployment-secret.yaml [new file with mode: 0644]
kubernetes/policy/templates/policy-log-configmap.yaml [new file with mode: 0644]
kubernetes/policy/templates/policy-pv-pvc.yaml [deleted file]
kubernetes/policy/values.yaml
kubernetes/portal/docker/init/ubuntu/Dockerfile [deleted file]
kubernetes/portal/resources/config/log/filebeat/filebeat.yml [new file with mode: 0644]
kubernetes/portal/resources/config/log/portal/onapportal/logback.xml [moved from kubernetes/config/docker/init/src/config/log/portal/onapportal/logback.xml with 100% similarity]
kubernetes/portal/resources/config/log/portal/onapportalsdk/logback.xml [moved from kubernetes/config/docker/init/src/config/log/portal/onapportalsdk/logback.xml with 100% similarity]
kubernetes/portal/resources/config/mariadb/oom_updates.sql [moved from kubernetes/config/docker/init/src/config/portal/mariadb/oom_updates.sql with 75% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTAL/fusion.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTAL/fusion.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTAL/openid-connect.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTAL/openid-connect.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTAL/portal.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTAL/portal.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTAL/system.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTAL/system.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTALSDK/fusion.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTALSDK/fusion.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTALSDK/portal.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTALSDK/portal.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPPORTALSDK/system.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPPORTALSDK/system.properties with 100% similarity]
kubernetes/portal/resources/config/portal-fe/webapps/etc/ONAPWIDGETMS/application.properties [moved from kubernetes/config/docker/init/src/config/portal/portal-fe/webapps/etc/ONAPWIDGETMS/application.properties with 100% similarity]
kubernetes/portal/resources/scripts/update_hosts.sh [new file with mode: 0644]
kubernetes/portal/templates/portal-apps-configmap.yaml [new file with mode: 0644]
kubernetes/portal/templates/portal-apps-deployment.yaml
kubernetes/portal/templates/portal-logs-configmap.yaml [new file with mode: 0644]
kubernetes/portal/templates/portal-mariadb-deployment.yaml
kubernetes/portal/templates/portal-vnc-dep.yaml
kubernetes/portal/templates/portal-widgets-deployment.yaml
kubernetes/portal/values.yaml
kubernetes/readiness/docker/init/ready.py
kubernetes/robot/demo-k8s.sh
kubernetes/sdnc/resources/config/conf/aaiclient.properties [moved from kubernetes/config/docker/init/src/config/sdnc/conf/aaiclient.properties with 100% similarity]
kubernetes/sdnc/resources/config/conf/admportal.json [moved from kubernetes/config/docker/init/src/config/sdnc/conf/admportal.json with 100% similarity]
kubernetes/sdnc/resources/config/dmaap/dhcpalert.properties [new file with mode: 0644]
kubernetes/sdnc/resources/config/log/filebeat/log4j/filebeat.yml [new file with mode: 0644]
kubernetes/sdnc/resources/config/log/org.ops4j.pax.logging.cfg [moved from kubernetes/config/docker/init/src/config/log/sdnc/org.ops4j.pax.logging.cfg with 100% similarity]
kubernetes/sdnc/resources/config/ueb/ueb-listener.properties [new file with mode: 0644]
kubernetes/sdnc/templates/dgbuilder-deployment.yaml
kubernetes/sdnc/templates/dmaap-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/sdnc/templates/dmaap-deployment.yaml [new file with mode: 0644]
kubernetes/sdnc/templates/nfs-provisoner-deployment.yaml
kubernetes/sdnc/templates/sdnc-conf-configmap.yaml [new file with mode: 0644]
kubernetes/sdnc/templates/sdnc-log-configmap.yaml [new file with mode: 0644]
kubernetes/sdnc/templates/sdnc-statefulset.yaml
kubernetes/sdnc/templates/ueb-deployment-configmap.yaml [new file with mode: 0644]
kubernetes/sdnc/templates/ueb-deployment.yaml [new file with mode: 0644]
kubernetes/sdnc/templates/web-deployment.yaml
kubernetes/sdnc/values.yaml
kubernetes/vid/resources/config/lf_config/vid-my.cnf [moved from kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-my.cnf with 100% similarity]
kubernetes/vid/resources/config/lf_config/vid-pre-init.sql [moved from kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-pre-init.sql with 100% similarity]
kubernetes/vid/resources/config/log/filebeat/filebeat.yml [new file with mode: 0644]
kubernetes/vid/resources/config/log/vid/logback.xml [moved from kubernetes/config/docker/init/src/config/log/vid/logback.xml with 100% similarity]
kubernetes/vid/templates/vid-lfconfig-configmap.yaml [new file with mode: 0644]
kubernetes/vid/templates/vid-log-configmap.yaml [new file with mode: 0644]
kubernetes/vid/templates/vid-mariadb-deployment.yaml
kubernetes/vid/templates/vid-pv-pvc.yaml
kubernetes/vid/templates/vid-server-deployment.yaml
kubernetes/vid/values.yaml

index b671448..028a8ee 100644 (file)
@@ -1,5 +1,2 @@
 kubernetes/config/onap-parameters.yaml
-.idea/vcs.xml
-.idea/modules.xml
-.idea/oom.iml
-.idea/workspace.xml
+.idea/*
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644 (file)
index 35eb1dd..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="4">
-  <component name="VcsDirectoryMappings">
-    <mapping directory="" vcs="Git" />
-  </component>
-</project>
\ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/LICENSE b/TOSCA/kubernetes-cluster-TOSCA/LICENSE
new file mode 100644 (file)
index 0000000..696f3d0
--- /dev/null
@@ -0,0 +1,17 @@
+ ============LICENSE_START==========================================
+ ===================================================================
+ Copyright Â© 2018 AT&T
+ All rights reserved.
+ ===================================================================
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ ============LICENSE_END============================================
\ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/README.md b/TOSCA/kubernetes-cluster-TOSCA/README.md
new file mode 100644 (file)
index 0000000..8bc097f
--- /dev/null
@@ -0,0 +1,73 @@
+[![Build Status](https://circleci.com/gh/cloudify-examples/simple-kubernetes-blueprint.svg?style=shield&circle-token=:circle-token)](https://circleci.com/gh/cloudify-examples/simple-kubernetes-blueprint)
+
+
+##  Kubernetes Cluster Example
+
+This blueprint creates an example Kubernetes cluster. It is intended as an example. The underlying Kubernetes configuration method used is [Kubeadm](https://kubernetes.io/docs/admin/kubeadm/), which is not considered production-ready.
+
+Regardless of your infrastructure choice, this blueprint installs and configures on each VM:
+- The Kubernetes Yum repo will be installed on your VMs.
+- Docker, version 1.12.6-28.git1398f24.el7.centos
+- kubelet, version 1.8.6-0.
+- kubeadm, version 1.8.6-0.
+- kubernetes-cni, version 0.5.1-1.
+- weave
+
+
+## prerequisites
+
+You will need a *Cloudify Manager* running in either AWS, Azure, or Openstack. The Cloudify manager should be setup using the [Cloudify environment setup](https://github.com/cloudify-examples/cloudify-environment-setup) - that's how we test this blueprint. The following are therefore assumed:
+* You have uploaded all of the required plugins to your manager in order to use this blueprint. (See the imports section of the blueprint.yaml file to check that you are using the correct plugins and their respective versions.)
+* You have created all of the required secrets on your manager in order to use this blueprint. (See #secrets.)
+* A Centos 7.X image. If you are running in AWS or Openstack, your image must support [Cloud-init](https://cloudinit.readthedocs.io/en/latest/).
+
+
+#### Secrets
+
+* Common Secrets:
+  * agent_key_private
+  * agent_key_public
+
+* Openstack Secrets:
+  * external_network_name: This is the network on your Openstack that represents the internet gateway network.
+  * public_network_name: An openstack network. (Inbound is expected, outbound is required.)
+  * public_subnet_name: A subnet on the public network.
+  * private_network_name: An openstack network. (Inbound is not expected, outbound is required.)
+  * private_subnet_name: A subnet on the network. (Inbound is not expected, outbound is required.)
+  * router_name: This is a router that is attached to your Subnets designated in the secrets public_subnet_name and private_subnet_name.
+  * region: Your Keystone V2 region.
+  * keystone_url: Your Keystone V2 auth URL.
+  * keystone_tenant_name: Your Keystone V2 tenant name.
+  * keystone_password: Your Keystone V2 password.
+  * keystone_username:Your Keystone V2 username.
+
+
+### Step 1: Install the Kubernetes cluster
+
+#### For Openstack run:
+
+Please follow the instruction on wiki
+https://wiki.onap.org/display/DW/ONAP+on+Kubernetes+on+Cloudify#ONAPonKubernetesonCloudify-OpenStack
+
+
+### Step 2: Verify the demo installed and started.
+
+Once the workflow execution is complete, verify that these secrets were created:
+
+
+```shell
+(Incubator)UNICORN:Projects trammell$ cfy secrets list
+Listing all secrets...
+
+Secrets:
++------------------------------------------+--------------------------+--------------------------+------------+----------------+------------+
+|                   key                    |        created_at        |        updated_at        | permission |  tenant_name   | created_by |
++------------------------------------------+--------------------------+--------------------------+------------+----------------+------------+
+| kubernetes-admin_client_certificate_data | 2017-08-09 14:58:06.421  | 2017-08-09 14:58:06.421  |            | default_tenant |   admin    |
+|     kubernetes-admin_client_key_data     | 2017-08-09 14:58:06.513  | 2017-08-09 14:58:06.513  |            | default_tenant |   admin    |
+|  kubernetes_certificate_authority_data   | 2017-08-09 14:58:06.327  | 2017-08-09 14:58:06.327  |            | default_tenant |   admin    |
+|           kubernetes_master_ip           | 2017-08-09 14:56:12.359  | 2017-08-09 14:56:12.359  |            | default_tenant |   admin    |
+|          kubernetes_master_port          | 2017-08-09 14:56:12.452  | 2017-08-09 14:56:12.452  |            | default_tenant |   admin    |
++------------------------------------------+--------------------------+--------------------------+------------+----------------+------------+
+```
+
diff --git a/TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml b/TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml
new file mode 100644 (file)
index 0000000..1376816
--- /dev/null
@@ -0,0 +1,76 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# this is the cloud init. It will install the reqiured packages and do some basic config on every VM.
+
+node_templates:
+
+  cloudify_host_cloud_config:
+    type: cloudify.nodes.CloudInit.CloudConfig
+    properties:
+      resource_config:
+        groups:
+          - docker
+        users:
+          - name: { get_input: agent_user }
+            primary-group: wheel
+            groups: docker
+            shell: /bin/bash
+            sudo: ['ALL=(ALL) NOPASSWD:ALL']
+            ssh-authorized-keys:
+              - { get_secret: agent_key_public }
+        write_files:
+          - path: /etc/yum.repos.d/kubernetes.repo
+            owner: root:root
+            permissions: '0444'
+            content: |
+              # installed by cloud-init
+              [kubernetes]
+              name=Kubernetes
+              baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+              enabled=1
+              gpgcheck=1
+              repo_gpgcheck=1
+              gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+                     https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+
+          - path: /etc/sysctl.d/k8s.conf
+            owner: root:root
+            permissions: '0444'
+            content: |
+              # installed by cloud-init
+              net.bridge.bridge-nf-call-ip6tables = 1
+              net.bridge.bridge-nf-call-iptables = 1
+
+        packages:
+          - [docker, 1.12.6]
+          - [kubelet, 1.8.6-0]
+          - [kubeadm, 1.8.6-0]
+          - [kubectl, 1.8.6-0]
+          - [kubernetes-cni, 0.5.1-1]
+          - [nfs-utils]
+        runcmd:
+          - [ setenforce, 0 ]
+          - [ sysctl , '--system' ]
+          - [ systemctl, enable, docker ]
+          - [ systemctl, start, docker ]
+          - [ systemctl, enable, kubelet ]
+          - [ systemctl, start, kubelet ]
+          - [ mkdir, '-p', /tmp/data ]
+          - [ chcon, '-Rt', svirt_sandbox_file_t, /tmp/data ]
+          - [ mkdir, '-p', /dockerdata-nfs ]
+          - [ chmod, 777, /dockerdata-nfs ]
\ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml b/TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml
new file mode 100644 (file)
index 0000000..4467fc4
--- /dev/null
@@ -0,0 +1,216 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+inputs:
+
+  labels:
+    default: {}
+
+node_types:
+
+  cloudify.nodes.Kubernetes:
+    derived_from: cloudify.nodes.Root
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        create:
+          implementation: scripts/create.py
+
+  cloudify.nodes.Kubernetes.Master:
+    derived_from: cloudify.nodes.Root
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        create:
+          implementation: scripts/create.py
+        configure:
+          implementation: scripts/kubernetes_master/configure.py
+        start:
+          implementation: scripts/kubernetes_master/start.py
+
+  cloudify.nodes.Kubernetes.Node:
+    derived_from: cloudify.nodes.Root
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        create:
+          implementation: scripts/create.py
+        configure:
+          implementation: scripts/kubernetes_node/configure.py
+        start:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            tasks_file:
+              default: scripts/tasks.py
+            task_name:
+              default: label_node
+            task_properties:
+              default:
+                hostname: { get_attribute: [ SELF, hostname ] }
+                labels: { get_input: labels }
+            fabric_env:
+              default:
+                host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+                user: { get_input: agent_user }
+                key: { get_secret: agent_key_private }
+        stop:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            tasks_file:
+              default: scripts/tasks.py
+            task_name:
+              default: stop_node
+            task_properties:
+              default:
+                hostname: { get_attribute: [ SELF, hostname ] }
+            fabric_env:
+              default:
+                host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+                user: { get_input: agent_user }
+                key: { get_secret: agent_key_private }
+        delete:
+          implementation: fabric.fabric_plugin.tasks.run_task
+          inputs:
+            tasks_file:
+              default: scripts/tasks.py
+            task_name:
+              default: delete_node
+            task_properties:
+              default:
+                hostname: { get_attribute: [ SELF, hostname ] }
+            fabric_env:
+              default:
+                host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+                user: { get_input: agent_user }
+                key: { get_secret: agent_key_private }
+
+node_templates:
+
+  kubernetes_master:
+    type: cloudify.nodes.Kubernetes.Master
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: kubernetes_master_host
+
+  kubernetes_node:
+    type: cloudify.nodes.Kubernetes.Node
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: kubernetes_node_host
+      - type: cloudify.relationships.depends_on
+        target: kubernetes_master
+
+outputs:
+
+  kubernetes_cluster_bootstrap_token:
+    value: { get_attribute: [ kubernetes_master, bootstrap_token ] }
+
+  kubernetes_cluster_master_ip:
+    value: { get_attribute: [ kubernetes_master, master_ip ] }
+
+  kubernetes-admin_client_certificate_data:
+    value: { get_attribute: [ kubernetes_master, kubernetes-admin_client_certificate_data ] }
+
+  kubernetes-admin_client_key_data:
+    value: { get_attribute: [ kubernetes_master, kubernetes-admin_client_key_data ] }
+
+  kubernetes_certificate_authority_data:
+    value: { get_attribute: [ kubernetes_master, kubernetes_certificate_authority_data ] }
+
+policy_types:
+  scale_policy_type:
+    source: policies/scale.clj
+    properties:
+      policy_operates_on_group:
+        default: true
+      service_selector:
+        description: regular expression that selects the metric to be measured
+        default: ".*"
+      moving_window_size:
+        description: the moving window for individual sources in secs
+        default: 10
+      scale_threshold:
+        description: the value to trigger scaling over aggregrated moving values
+      scale_limit:
+        description: scaling limit
+        default: 10
+      scale_direction:
+        description: scale up ('<') or scale down ('>')
+        default: '<'
+      cooldown_time:
+        description: the time to wait before evaluating again after a scale
+        default: 60
+
+groups: {}
+
+#  scale_up_group:
+#    members: [kubernetes_node_host]
+#    policies:
+#      auto_scale_up:
+#        type: scale_policy_type
+#        properties:
+#          policy_operates_on_group: true
+#          scale_limit: 6
+#          scale_direction: '<'
+#          scale_threshold: 30
+#          service_selector: .*kubernetes_node_host.*cpu.total.user
+#          cooldown_time: 60
+#        triggers:
+#          execute_scale_workflow:
+#            type: cloudify.policies.triggers.execute_workflow
+#            parameters:
+#              workflow: scale
+#              workflow_parameters:
+#                delta: 1
+#                scalable_entity_name: kubernetes_node_host
+
+#  scale_down_group:
+#    members: [kubernetes_node_host]
+#    policies:
+#      auto_scale_down:
+#        type: scale_policy_type
+#        properties:
+#          policy_operates_on_group: true
+#          scale_limit: 6
+#          scale_direction: '<'
+#          scale_threshold: 30
+#          service_selector: .*kubernetes_node_host.*cpu.total.user
+#          cooldown_time: 60
+#        triggers:
+#          execute_scale_workflow:
+#            type: cloudify.policies.triggers.execute_workflow
+#            parameters:
+#              workflow: scale
+#              workflow_parameters:
+#                delta: 1
+#                scalable_entity_name: kubernetes_node_host
+
+#  heal_group:
+#    members: [kubernetes_node_host]
+#    policies:
+#      simple_autoheal_policy:
+#        type: cloudify.policies.types.host_failure
+#        properties:
+#          service:
+#            - .*kubernetes_node_host.*.cpu.total.system
+#            - .*kubernetes_node_host.*.process.hyperkube.cpu.percent
+#          interval_between_workflows: 60
+#        triggers:
+#          auto_heal_trigger:
+#            type: cloudify.policies.triggers.execute_workflow
+#            parameters:
+#              workflow: heal
+#              workflow_parameters:
+#                node_instance_id: { 'get_property': [ SELF, node_id ] }
+#                diagnose_value: { 'get_property': [ SELF, diagnose ] }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml b/TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml
new file mode 100644 (file)
index 0000000..5c348e9
--- /dev/null
@@ -0,0 +1,307 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+  This blueprint creates a Kubernetes Cluster.
+  It is based on this documentation: https://kubernetes.io/docs/getting-started-guides/kubeadm/
+
+imports:
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-openstack-plugin/2.2.0/plugin.yaml
+  - https://raw.githubusercontent.com/cloudify-incubator/cloudify-utilities-plugin/1.2.5/plugin.yaml
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-fabric-plugin/1.5/plugin.yaml
+  - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-diamond-plugin/1.3.5/plugin.yaml
+  - imports/cloud-config.yaml
+  - imports/kubernetes.yaml
+
+inputs:
+
+  image:
+    description: Image to be used when launching agent VMs
+    default: { get_secret: centos_core_image }
+
+  flavor:
+    description: Flavor of the agent VMs
+    default: { get_secret: large_image_flavor }
+
+  agent_user:
+    description: >
+      User for connecting to agent VMs
+    default: centos
+
+dsl_definitions:
+
+  openstack_config: &openstack_config
+    username: { get_secret: keystone_username }
+    password: { get_secret: keystone_password }
+    tenant_name: { get_secret: keystone_tenant_name }
+    auth_url: { get_secret: keystone_url }
+    region: { get_secret: region }
+
+node_templates:
+
+  nfs_server:
+    type: cloudify.nodes.SoftwareComponent
+    properties:
+    interfaces:
+      cloudify.interfaces.lifecycle:
+         start:
+          implementation: fabric.fabric_plugin.tasks.run_script
+          inputs:
+            script_path: scripts/nfs.sh
+            use_sudo: true
+            process:
+              args:
+            fabric_env:
+              host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+              user: { get_input: agent_user }
+              key: { get_secret: agent_key_private }
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: kubernetes_master_host
+
+  kubernetes_master_host:
+    type: cloudify.openstack.nodes.Server
+    properties:
+      openstack_config: *openstack_config
+      agent_config:
+          user: { get_input: agent_user }
+          install_method: remote
+          port: 22
+          key: { get_secret: agent_key_private }
+      server:
+        key_name: ''
+        image: ''
+        flavor: ''
+      management_network_name: { get_property: [ public_network, resource_id ] }
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        create:
+          inputs:
+            args:
+              image: { get_input: image }
+              flavor: { get_input: flavor }
+              userdata: { get_attribute: [ cloudify_host_cloud_config, cloud_config ] }
+    relationships:
+      - target: kubernetes_master_port
+        type: cloudify.openstack.server_connected_to_port
+      - type: cloudify.relationships.depends_on
+        target: cloudify_host_cloud_config
+
+  kubernetes_node_host:
+    type: cloudify.openstack.nodes.Server
+    properties:
+      openstack_config: *openstack_config
+      agent_config:
+          user: { get_input: agent_user }
+          install_method: remote
+          port: 22
+          key: { get_secret: agent_key_private }
+      server:
+        key_name: ''
+        image: ''
+        flavor: ''
+      management_network_name: { get_property: [ private_network, resource_id ] }
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: k8s_node_scaling_tier
+      - target: kubernetes_node_port
+        type: cloudify.openstack.server_connected_to_port
+    interfaces:
+      cloudify.interfaces.lifecycle:
+        create:
+          inputs:
+            args:
+              image: { get_input: image }
+              flavor: { get_input: flavor }
+              userdata: { get_attribute: [ cloudify_host_cloud_config, cloud_config ] }
+      cloudify.interfaces.monitoring_agent:
+          install:
+            implementation: diamond.diamond_agent.tasks.install
+            inputs:
+              diamond_config:
+                interval: 1
+          start: diamond.diamond_agent.tasks.start
+          stop: diamond.diamond_agent.tasks.stop
+          uninstall: diamond.diamond_agent.tasks.uninstall
+      cloudify.interfaces.monitoring:
+          start:
+            implementation: diamond.diamond_agent.tasks.add_collectors
+            inputs:
+              collectors_config:
+                CPUCollector: {}
+                MemoryCollector: {}
+                LoadAverageCollector: {}
+                DiskUsageCollector:
+                  config:
+                    devices: x?vd[a-z]+[0-9]*$
+                NetworkCollector: {}
+                ProcessResourcesCollector:
+                  config:
+                    enabled: true
+                    unit: B
+                    measure_collector_time: true
+                    cpu_interval: 0.5
+                    process:
+                      hyperkube:
+                        name: hyperkube
+
+  kubernetes_security_group:
+    type: cloudify.openstack.nodes.SecurityGroup
+    properties:
+      openstack_config: *openstack_config
+      security_group:
+        name: kubernetes_security_group
+        description: kubernetes master security group
+      rules:
+      - remote_ip_prefix: 0.0.0.0/0
+        port_range_min: 1
+        port_range_max: 65535
+        protocol: tcp
+        direction: ingress
+        ethertype: IPv4
+      - remote_ip_prefix: 0.0.0.0/0
+        port_range_min: 1
+        port_range_max: 65535
+        protocol: tcp
+        direction: egress
+        ethertype: IPv4
+      - remote_ip_prefix: 0.0.0.0/0
+        port_range_min: 1
+        port_range_max: 65535
+        protocol: udp
+        direction: ingress
+        ethertype: IPv4
+      - remote_ip_prefix: 0.0.0.0/0
+        port_range_min: 1
+        port_range_max: 65535
+        protocol: udp
+        direction: egress
+        ethertype: IPv4
+
+  kubernetes_master_port:
+    type: cloudify.openstack.nodes.Port
+    properties:
+      openstack_config: *openstack_config
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: public_network
+      - type: cloudify.relationships.depends_on
+        target: public_subnet
+      - type: cloudify.openstack.port_connected_to_security_group
+        target: kubernetes_security_group
+      - type: cloudify.openstack.port_connected_to_floating_ip
+        target: kubernetes_master_ip
+
+  kubernetes_node_port:
+    type: cloudify.openstack.nodes.Port
+    properties:
+      openstack_config: *openstack_config
+    relationships:
+      - type: cloudify.relationships.contained_in
+        target: k8s_node_scaling_tier
+      - type: cloudify.relationships.connected_to
+        target: private_network
+      - type: cloudify.relationships.depends_on
+        target: private_subnet
+      - type: cloudify.openstack.port_connected_to_security_group
+        target: kubernetes_security_group
+
+  private_subnet:
+    type: cloudify.openstack.nodes.Subnet
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: private_subnet_name }
+    relationships:
+      - target: private_network
+        type: cloudify.relationships.contained_in
+
+  private_network:
+    type: cloudify.openstack.nodes.Network
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: private_network_name }
+
+  public_subnet:
+    type: cloudify.openstack.nodes.Subnet
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: public_subnet_name }
+    relationships:
+      - target: public_network
+        type: cloudify.relationships.contained_in
+      - target: router
+        type: cloudify.openstack.subnet_connected_to_router
+
+  public_network:
+    type: cloudify.openstack.nodes.Network
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: public_network_name }
+
+  router:
+    type: cloudify.openstack.nodes.Router
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: router_name }
+    relationships:
+      - target: external_network
+        type: cloudify.relationships.connected_to
+
+  external_network:
+    type: cloudify.openstack.nodes.Network
+    properties:
+      openstack_config: *openstack_config
+      use_external_resource: true
+      resource_id: { get_secret: external_network_name }
+
+  k8s_node_scaling_tier:
+    type: cloudify.nodes.Root
+
+  kubernetes_master_ip:
+    type: cloudify.openstack.nodes.FloatingIP
+    properties:
+      openstack_config: *openstack_config
+      floatingip:
+        floating_network_name: { get_property: [ external_network, resource_id ] }
+
+groups:
+
+  k8s_node_group:
+    members:
+      - kubernetes_node_host
+      - kubernetes_node_port
+
+policies:
+
+  kubernetes_node_vms_scaling_policy:
+    type: cloudify.policies.scaling
+    properties:
+      default_instances: 6
+    targets: [k8s_node_group]
+
+outputs:
+
+  kubernetes_master_public_ip:
+    value: { get_attribute: [ kubernetes_master_ip, floating_ip_address ] }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj b/TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj
new file mode 100644 (file)
index 0000000..369239a
--- /dev/null
@@ -0,0 +1,66 @@
+;;;; ============LICENSE_START==========================================
+;;;; ===================================================================
+;;;; Copyright Â© 2017 AT&T
+;;;;
+;;;; Licensed under the Apache License, Version 2.0 (the "License");
+;;;; you may not use this file except in compliance with the License.
+;;;; You may obtain a copy of the License at
+;;;;
+;;;;         http://www.apache.org/licenses/LICENSE-2.0
+;;;;
+;;;; Unless required by applicable law or agreed to in writing, software
+;;;; distributed under the License is distributed on an "AS IS" BASIS,
+;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;;;; See the License for the specific language governing permissions and
+;;;; limitations under the License.
+;;;;============LICENSE_END============================================
+
+(where (service #"{{service_selector}}")
+  #(info "got event: " %)
+
+  (where (not (expired? event))
+    (moving-time-window {{moving_window_size}}
+      (fn [events]
+        (let [
+               hostmap (atom {})
+               hostcnt (atom {})
+             ]
+          (do
+            (doseq [m events]
+              (if (nil? (@hostmap (m :host)))
+                (do
+                  (swap! hostmap assoc (m :host) (m :metric))
+                  (swap! hostcnt assoc (m :host) 1)
+                )
+                (do
+                  (swap! hostmap assoc (m :host) (+ (m :metric) (@hostmap (m :host))))
+                  (swap! hostcnt assoc (m :host) (inc (@hostcnt (m :host))))
+                )
+              )
+            )
+            (doseq [entry @hostmap]
+              (swap! hostmap assoc (key entry) (/ (val entry) (@hostcnt (key entry))))
+            )
+
+            (let
+              [ hostcnt (count @hostmap)
+                conns (/ (apply + (map (fn [a] (val a)) @hostmap)) hostcnt)
+                cooling (not (nil? (riemann.index/lookup index "scaling" "suspended")))]
+
+              (do
+                (info "cooling=" cooling " scale_direction={{scale_direction}} hostcnt=" hostcnt " scale_threshold={{scale_threshold}} conns=" conns)
+                (if (and (not cooling) ({{scale_direction}} hostcnt {{scale_limit}}) ({{scale_direction}} {{scale_threshold}} conns))
+                  (do
+                    (info "=== SCALE ===" "{{scale_direction}}")
+                    (process-policy-triggers {})
+                    (riemann.index/update index {:host "scaling" :service "suspended" :time (unix-time) :description "cooldown flag" :metric 0 :ttl {{cooldown_time}} :state "ok"})
+                  )
+                )
+              )
+            )
+          )
+        )
+      )
+    )
+  )
+)
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py
new file mode 100644 (file)
index 0000000..4bb3710
--- /dev/null
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# This tack will be triggered after VM created. It will check whether docker is up and running.
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+
+
+def check_command(command):
+
+    try:
+        process = subprocess.Popen(
+            command.split()
+        )
+    except OSError:
+        return False
+
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(command))
+        return False
+
+    return True
+
+
+def execute_command(_command):
+
+    ctx.logger.debug('_command {0}.'.format(_command))
+
+    subprocess_args = {
+        'args': _command.split(),
+        'stdout': subprocess.PIPE,
+        'stderr': subprocess.PIPE
+    }
+
+    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+    process = subprocess.Popen(**subprocess_args)
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(_command))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(_command))
+        return False
+
+    return output
+
+
+if __name__ == '__main__':
+
+    # Check if Docker PS works
+    docker = check_command('docker ps')
+    if not docker:
+            raise OperationRetry(
+                'Docker is not present on the system.')
+    ctx.logger.info('Docker is present on the system.')
+
+    # Next check if Cloud Init is running.
+    finished = False
+    ps = execute_command('ps -ef')
+    for line in ps.split('\n'):
+        if '/usr/bin/python /usr/bin/cloud-init modules' in line:
+            raise OperationRetry(
+                'You provided a Cloud-init Cloud Config to configure instances. '
+                'Waiting for Cloud-init to complete.')
+    ctx.logger.info('Cloud-init finished.')
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py
new file mode 100644 (file)
index 0000000..7d5dffc
--- /dev/null
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# This script will be executed on Kubernetes master host. It will initialize the master, and install a pod network.
+
+import pwd
+import grp
+import os
+import re
+import getpass
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+from cloudify_rest_client.exceptions import CloudifyClientError
+
+JOIN_COMMAND_REGEX = '^kubeadm join[\sA-Za-z0-9\.\:\-\_]*'
+BOOTSTRAP_TOKEN_REGEX = '[a-z0-9]{6}.[a-z0-9]{16}'
+IP_PORT_REGEX = '[0-9]+(?:\.[0-9]+){3}:[0-9]+'
+NOT_SHA_REGEX='^(?!.*sha256)'
+JCRE_COMPILED = re.compile(JOIN_COMMAND_REGEX)
+BTRE_COMPILED = re.compile(BOOTSTRAP_TOKEN_REGEX)
+IPRE_COMPILED = re.compile(IP_PORT_REGEX)
+SHA_COMPILED=re.compile(NOT_SHA_REGEX)
+
+def execute_command(_command):
+
+    ctx.logger.debug('_command {0}.'.format(_command))
+
+    subprocess_args = {
+        'args': _command.split(),
+        'stdout': subprocess.PIPE,
+        'stderr': subprocess.PIPE
+    }
+
+    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+    process = subprocess.Popen(**subprocess_args)
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(_command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(_command))
+        return False
+
+    return output
+
+
+def cleanup_and_retry():
+    reset_cluster_command = 'sudo kubeadm reset'
+    output = execute_command(reset_cluster_command)
+    ctx.logger.info('reset_cluster_command {1}'.format(reset_cluster_command, output))
+    raise OperationRetry('Restarting kubernetes because of a problem.')
+
+
+def configure_admin_conf():
+    # Add the kubeadmin config to environment
+    agent_user = getpass.getuser()
+    uid = pwd.getpwnam(agent_user).pw_uid
+    gid = grp.getgrnam('docker').gr_gid
+    admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+
+    execute_command('sudo cp {0} {1}'.format('/etc/kubernetes/admin.conf', admin_file_dest))
+    execute_command('sudo chown {0}:{1} {2}'.format(uid, gid, admin_file_dest))
+
+    with open(os.path.join(os.path.expanduser('~'), '.bashrc'), 'a') as outfile:
+        outfile.write('export KUBECONFIG=$HOME/admin.conf')
+    os.environ['KUBECONFIG'] = admin_file_dest
+
+
+def setup_secrets(_split_master_port, _bootstrap_token):
+    master_ip = split_master_port[0]
+    master_port = split_master_port[1]
+    ctx.instance.runtime_properties['master_ip'] = _split_master_port[0]
+    ctx.instance.runtime_properties['master_port'] = _split_master_port[1]
+    ctx.instance.runtime_properties['bootstrap_token'] = _bootstrap_token
+    from cloudify import manager
+    cfy_client = manager.get_rest_client()
+
+    _secret_key = 'kubernetes_master_ip'
+    if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+        cfy_client.secrets.create(key=_secret_key, value=master_ip)
+    else:
+        cfy_client.secrets.update(key=_secret_key, value=master_ip)
+    ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+    _secret_key = 'kubernetes_master_port'
+    if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+        cfy_client.secrets.create(key=_secret_key, value=master_port)
+    else:
+        cfy_client.secrets.update(key=_secret_key, value=master_port)
+    ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+    _secret_key = 'bootstrap_token'
+    if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+        cfy_client.secrets.create(key=_secret_key, value=_bootstrap_token)
+    else:
+        cfy_client.secrets.update(key=_secret_key, value=_bootstrap_token)
+    ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+
+if __name__ == '__main__':
+
+    ctx.instance.runtime_properties['KUBERNETES_MASTER'] = True
+    cniCommand1=subprocess.Popen(["sudo", "sysctl", 'net.bridge.bridge-nf-call-iptables=1'], stdout=subprocess.PIPE)
+    # Start Kubernetes Master
+    ctx.logger.info('Attempting to start Kubernetes master.')
+    start_master_command = 'sudo kubeadm init'
+    start_output = execute_command(start_master_command)
+    ctx.logger.debug('start_master_command output: {0}'.format(start_output))
+    # Check if start succeeded.
+    if start_output is False or not isinstance(start_output, basestring):
+        ctx.logger.error('Kubernetes master failed to start.')
+        cleanup_and_retry()
+    ctx.logger.info('Kubernetes master started successfully.')
+
+    # Slice and dice the start_master_command start_output.
+    ctx.logger.info('Attempting to retrieve Kubernetes cluster information.')
+    split_start_output = \
+        [line.strip() for line in start_output.split('\n') if line.strip()]
+    del line
+
+    ctx.logger.debug(
+        'Kubernetes master start output, split and stripped: {0}'.format(
+            split_start_output))
+    split_join_command = ''
+    for li in split_start_output:
+        ctx.logger.debug('li in split_start_output: {0}'.format(li))
+        if re.match(JCRE_COMPILED, li):
+            split_join_command = re.split('\s', li)
+    del li
+    ctx.logger.info('split_join_command: {0}'.format(split_join_command))
+
+    if not split_join_command:
+        ctx.logger.error('No join command in split_start_output: {0}'.format(split_join_command))
+        cleanup_and_retry()
+
+    for li in split_join_command:
+        ctx.logger.info('Sorting bits and pieces: li: {0}'.format(li))
+        if (re.match(BTRE_COMPILED, li) and re.match(SHA_COMPILED, li)):
+            bootstrap_token = li
+        elif re.match(IPRE_COMPILED, li):
+            split_master_port = li.split(':')
+    setup_secrets(split_master_port, bootstrap_token)
+    configure_admin_conf()
+
+    weaveCommand1=subprocess.Popen(["kubectl", "version"], stdout=subprocess.PIPE)
+    weaveCommand2=subprocess.Popen(["base64"],stdin=weaveCommand1.stdout, stdout=subprocess.PIPE)
+    kubever = weaveCommand2.communicate()[0]
+    kubever = kubever.replace('\n', '').replace('\r', '')
+    ctx.logger.info("kubever :"+kubever)
+    weaveURL=('https://cloud.weave.works/k8s/net?k8s-version={0}'.format(kubever))
+    ctx.logger.info("weaveURL:" + weaveURL)
+    weaveCommand4=subprocess.Popen(["kubectl","apply","-f",weaveURL],stdout=subprocess.PIPE)
+    weaveResult= weaveCommand4.communicate()[0]
+    ctx.logger.info("weaveResult :"+weaveResult)
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py
new file mode 100644 (file)
index 0000000..bbc166b
--- /dev/null
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+#This script will be execute on master host. This script will check whether Kube-DNS is running, and set secrets in cloudify.
+
+import os
+import subprocess
+import pip
+try:
+    import yaml
+except ImportError:
+    pip.main(['install', 'pyyaml'])
+    import yaml
+
+from cloudify import ctx
+from cloudify.exceptions import RecoverableError
+from cloudify import manager
+
+
+def execute_command(_command):
+
+    ctx.logger.debug('_command {0}.'.format(_command))
+
+    subprocess_args = {
+        'args': _command.split(),
+        'stdout': subprocess.PIPE,
+        'stderr': subprocess.PIPE
+    }
+
+    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+    process = subprocess.Popen(**subprocess_args)
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(_command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(_command))
+        return False
+
+    return output
+
+
+def check_kubedns_status(_get_pods):
+
+    ctx.logger.debug('get_pods: {0} '.format(_get_pods))
+
+    for pod_line in _get_pods.split('\n'):
+        ctx.logger.debug('pod_line: {0} '.format(pod_line))
+        try:
+            _namespace, _name, _ready, _status, _restarts, _age = pod_line.split()
+        except ValueError:
+            pass
+        else:
+            if 'kube-dns' in _name and 'Running' not in _status:
+                return False
+            elif 'kube-dns' in _name and 'Running' in _status:
+                return True
+    return False
+
+
+if __name__ == '__main__':
+
+    cfy_client = manager.get_rest_client()
+
+    # Checking if the Kubernetes DNS service is running (last step).
+    admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+    os.environ['KUBECONFIG'] = admin_file_dest
+    get_pods = execute_command('kubectl get pods --all-namespaces')
+    if not check_kubedns_status(get_pods):
+        raise RecoverableError('kube-dns not Running')
+
+    # Storing the K master configuration.
+    kubernetes_master_config = {}
+    with open(admin_file_dest, 'r') as outfile:
+        try:
+            kubernetes_master_config = yaml.load(outfile)
+        except yaml.YAMLError as e:
+            RecoverableError(
+                'Unable to read Kubernetes Admin file: {0}: {1}'.format(
+                    admin_file_dest, str(e)))
+    ctx.instance.runtime_properties['configuration_file_content'] = \
+        kubernetes_master_config
+
+    clusters = kubernetes_master_config.get('clusters')
+    _clusters = {}
+    for cluster in clusters:
+        __name = cluster.get('name')
+        _cluster = cluster.get('cluster', {})
+        _secret_key = '%s_certificate_authority_data' % __name
+        if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+            cfy_client.secrets.create(key=_secret_key, value=_cluster.get('certificate-authority-data'))
+            ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+        else:
+            cfy_client.secrets.update(key=_secret_key, value=_cluster.get('certificate-authority-data'))
+        ctx.instance.runtime_properties['%s_certificate_authority_data' % __name] = _cluster.get('certificate-authority-data')
+        _clusters[__name] = _cluster
+    del __name
+
+    contexts = kubernetes_master_config.get('contexts')
+    _contexts = {}
+    for context in contexts:
+        __name = context.get('name')
+        _context = context.get('context', {})
+        _contexts[__name] = _context
+    del __name
+
+    users = kubernetes_master_config.get('users')
+    _users = {}
+    for user in users:
+        __name = user.get('name')
+        _user = user.get('user', {})
+        _secret_key = '%s_client_certificate_data' % __name
+        if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+            cfy_client.secrets.create(key=_secret_key, value=_user.get('client-certificate-data'))
+            ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+        else:
+            cfy_client.secrets.update(key=_secret_key, value=_user.get('client-certificate-data'))
+        _secret_key = '%s_client_key_data' % __name
+        if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+            cfy_client.secrets.create(key=_secret_key, value=_user.get('client-key-data'))
+            ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+        else:
+            cfy_client.secrets.update(key=_secret_key, value=_user.get('client-key-data'))
+        ctx.instance.runtime_properties['%s_client_certificate_data' % __name] = _user.get('client-certificate-data')
+        ctx.instance.runtime_properties['%s_client_key_data' % __name] = _user.get('client-key-data')
+        _users[__name] = _user
+    del __name
+
+    ctx.instance.runtime_properties['kubernetes'] = {
+        'clusters': _clusters,
+        'contexts': _contexts,
+        'users': _users
+    }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py
new file mode 100644 (file)
index 0000000..69faaa8
--- /dev/null
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# Afther K8s master up and running. This script will be triggered in each worker nodes. It will join the nodes, and mount the NFS directory.
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+START_COMMAND = 'sudo kubeadm join --token {0} {1}:{2}'
+
+
+def execute_command(_command):
+
+    ctx.logger.debug('_command {0}.'.format(_command))
+
+    subprocess_args = {
+        'args': _command.split(),
+        'stdout': subprocess.PIPE,
+        'stderr': subprocess.PIPE
+    }
+
+    ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+    process = subprocess.Popen(**subprocess_args)
+    output, error = process.communicate()
+
+    ctx.logger.debug('command: {0} '.format(_command))
+    ctx.logger.debug('output: {0} '.format(output))
+    ctx.logger.debug('error: {0} '.format(error))
+    ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+    if process.returncode:
+        ctx.logger.error('Running `{0}` returns error.'.format(_command))
+        return False
+
+    return output
+
+
+if __name__ == '__main__':
+
+    hostname = execute_command('hostname')
+    ctx.instance.runtime_properties['hostname'] = hostname.rstrip('\n')
+
+    # Get the master cluster info.
+    masters = \
+        [x for x in ctx.instance.relationships if
+         x.target.instance.runtime_properties.get(
+             'KUBERNETES_MASTER', False)]
+    if len(masters) != 1:
+        raise NonRecoverableError(
+            'Currently, a Kubernetes node must have a '
+            'dependency on one Kubernetes master.')
+    master = masters[0]
+    bootstrap_token = \
+        master.target.instance.runtime_properties['bootstrap_token']
+    master_ip = \
+        master.target.instance.runtime_properties['master_ip']
+    master_port = \
+        master.target.instance.runtime_properties['master_port']
+
+    # Join the cluster.
+    cniCommand1=subprocess.Popen(["sudo", "sysctl", 'net.bridge.bridge-nf-call-iptables=1'], stdout=subprocess.PIPE)
+    join_command = \
+        'sudo kubeadm join --token {0} {1}:{2}'.format(
+            bootstrap_token, master_ip, master_port)
+    execute_command(join_command)
+
+    #mount
+    mount_command=\
+        'sudo mount -t nfs -o proto=tcp,port=2049 {0}:/dockerdata-nfs /dockerdata-nfs'.format(master_ip)
+    execute_command(mount_command)
\ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh b/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh
new file mode 100644 (file)
index 0000000..2d59acd
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# this script will set the NFS server on k8s master.
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+yum -y install nfs-utils
+systemctl enable nfs-server.service
+systemctl start nfs-server.service
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)" |sudo tee --append /etc/exports
+echo "/home/centos/dockerdata-nfs /dockerdata-nfs    none    bind  0  0" |sudo tee --append /etc/fstab
+exportfs -a
\ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py
new file mode 100644 (file)
index 0000000..7680fac
--- /dev/null
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright Â© 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# here we define some tasks
+
+from fabric.api import run
+
+
+def label_node(labels, hostname):
+    if labels:
+        label_list = []
+        for key, value in labels.items():
+            label_pair_string = '%s=%s' % (key, value)
+            label_list.append(label_pair_string)
+        label_string = ' '.join(label_list)
+        command = 'kubectl label nodes %s %s' % (hostname, label_string)
+        run(command)
+
+
+def stop_node(hostname):
+    command = 'kubectl drain %s' % (hostname)
+    run(command)
+
+
+def delete_node(hostname):
+    command = 'kubectl delete no %s' % (hostname)
+    run(command)
index ee3b2f1..00ebccb 100644 (file)
@@ -10,10 +10,10 @@ Introduction
 ============
 
 The ONAP Operations Manager (OOM) is responsible for life-cycle
-management of the ONAP platform itself; components such as MSO, SDNC,
+management of the ONAP platform itself; components such as SO, SDNC,
 etc. It is not responsible for the management of services, VNFs or
 infrastructure instantiated by ONAP or used by ONAP to host such
-services or VNFs. OOM uses the open-source Kubernetes container
+services or VNFs. OOM uses the open source Kubernetes container
 management system as a means to manage the Docker containers that
 compose ONAP where the containers are hosted either directly on
 bare-metal servers or on VMs hosted by a 3rd party management system.
@@ -21,10 +21,10 @@ OOM ensures that ONAP is easily deployable and maintainable throughout
 its life cycle while using hardware resources efficiently. There are two
 deployment options for OOM:
 
--  A minimal deployment where single instances of the ONAP components
+-  *A minimal deployment* where single instances of the ONAP components
    are instantiated with no resource reservations, and
 
--  | A production deployment where ONAP components are deployed with
+-  | *A production deployment* where ONAP components are deployed with
      redundancy and anti-affinity rules such that single faults do not
      interrupt ONAP operation.
    | When deployed as containers directly on bare-metal, the minimal
@@ -34,14 +34,14 @@ deployment options for OOM:
      resources as determined by anti-affinity and geo-redundancy
      requirements.
 
-OOM deployments of ONAP provide many benefits:
+**OOM deployments of ONAP provide many benefits:**
 
--  Life-cycle Management Kubernetes is a comprehensive system for
+-  *Life-cycle management*. Kubernetes is a comprehensive system for
    managing the life-cycle of containerized applications. Its use as a
    platform manager will ease the deployment of ONAP, provide fault
    tolerance and horizontal scalability, and enable seamless upgrades.
 
--  Hardware Efficiency ONAP can be deployed on a single host using less
+-  *Hardware Efficiency*. ONAP can be deployed on a single host using less
    than 32GB of memory. As opposed to VMs that require a guest operating
    system be deployed along with the application, containers provide
    similar application encapsulation with neither the computing, memory
@@ -50,42 +50,42 @@ OOM deployments of ONAP provide many benefits:
    be able to create a development deployment of ONAP that can be hosted
    on a laptop.
 
--  Rapid Deployment With locally cached images ONAP can be deployed from
+-  *Rapid deployment*. With locally cached images, ONAP can be deployed from
    scratch in 7 minutes. Eliminating the guest operating system results
    in containers coming into service much faster than a VM equivalent.
    This advantage can be particularly useful for ONAP where rapid
    reaction to inevitable failures will be critical in production
    environments.
 
--  Portability OOM takes advantage of Kubernetes' ability to be hosted
+-  *Portability*. OOM takes advantage of Kubernetes' ability to be hosted
    on multiple hosted cloud solutions like Google Compute Engine, AWS
    EC2, Microsoft Azure, CenturyLink Cloud, IBM Bluemix and more.
 
--  Minimal Impact As ONAP is already deployed with Docker containers
+-  *Minimal impact*. As ONAP is already deployed with Docker containers
    minimal changes are required to the components themselves when
    deployed with OOM.
 
-Features of OOM:
+**Features of OOM:**
 
--  Platform Deployment Automated deployment/un-deployment of ONAP
-   instance(s) / Automated deployment/un-deployment of individual
-   platform components using docker containers & kubernetes
+-  *Platform Deployment*. Automated deployment/un-deployment of ONAP
+   instance(s) / automated deployment/un-deployment of individual
+   platform components using Docker containers & Kubernetes.
 
--  Platform Monitoring & healing Monitor platform state, Platform health
-   checks, fault tolerance and self-healing using docker containers &
-   kubernetes
+-  *Platform Monitoring & Healing*. Monitor platform state, platform health
+   checks, fault tolerance and self-healing using Docker containers &
+   Kubernetes.
 
--  Platform Scaling Platform horizontal scalability through using docker
-   containers & kubernetes
+-  *Platform Scaling*. Platform horizontal scalability through using Docker
+   containers & Kubernetes.
 
--  Platform Upgrades Platform upgrades using docker containers &
-   kubernetes
+-  *Platform Upgrades*. Platform upgrades using Docker containers &
+   Kubernetes.
 
--  Platform Configurations Manage overall platform components
-   configurations using docker containers & kubernetes
+-  *Platform Configurations*. Manage overall platform components
+   configurations using Docker containers & Kubernetes.
 
--  | Platform migrations Manage migration of platform components using
-     docker containers & kubernetes
+-  | *Platform migrations*. Manage migration of platform components using
+     Docker containers & Kubernetes.
    | Please note that the ONAP Operations Manager does not provide
      support for containerization of services or VNFs that are managed
      by ONAP; the OOM orchestrates the life-cycle of the ONAP platform
@@ -125,7 +125,7 @@ stopped and deleted. These life-cycle operations are managed by
 the \ `Kubernetes <https://kubernetes.io/>`__ container management
 system which maintains the desired state of the container system as
 described by one or more deployment descriptors - similar in concept to
-OpenStack HEAT Orchestration Templates. The following sections describe
+OpenStack Heat Orchestration Templates. The following sections describe
 the fundamental objects managed by Kubernetes, the network these
 components use to communicate with each other and other entities outside
 of ONAP and the templates that describe the configuration and desired
@@ -137,7 +137,7 @@ ONAP Components to Kubernetes Object Relationships
 Kubernetes deployments consist of multiple objects:
 
 -  nodes - a worker machine - either physical or virtual - that hosts
-   multiple containers managed by kubernetes.
+   multiple containers managed by Kubernetes.
 
 -  services - an abstraction of a logical set of pods that provide a
    micro-service.
@@ -152,7 +152,7 @@ The relationship between these objects is shown in the following figure:
 
 .. figure:: ../kubernetes_objects.png
 
-OOM uses these kubernetes objects as described in the following
+OOM uses these Kubernetes objects as described in the following
 sections.
 
 Nodes
@@ -181,15 +181,15 @@ OOM works with both physical and virtual worker machines. Â 
    to use
    `Rancher <http://rancher.com/docs/rancher/v1.6/en/quick-start-guide/>`__
    along with `Helm <https://github.com/kubernetes/helm/releases>`__ to
-   associate hosts with a kubernetes cluster.
+   associate hosts with a Kubernetes cluster.
 
 Pods
 ~~~~
 
 A group of containers with shared storage and networking can be grouped
-together into a kubernetes pod. Â All of the containers within a pod are
+together into a Kubernetes pod. Â All of the containers within a pod are
 co-located and co-scheduled so they operate as a single unit. Â Within
-ONAP Amsterdam release, pods are mapped one-to-one to docker containers
+ONAP Amsterdam release, pods are mapped one-to-one to Docker containers
 although this may change in the future. Â As explained in the Services
 section below the use of Pods within each ONAP component is abstracted
 from other ONAP components.
@@ -197,13 +197,13 @@ from other ONAP components.
 Services
 ~~~~~~~~
 
-OOM uses the kubernetes service abstraction to provide a consistent
+OOM uses the Kubernetes service abstraction to provide a consistent
 access point for each of the ONAP components independent of the pod or
-container architecture of that component. Â For example, the SDNC
+container architecture of that component. For example, the SDNC
 component may introduce OpenDaylight clustering as some point and change
 the number of pods in this component to three or more but this change
 will be isolated from the other ONAP components by the service
-abstraction. Â A service can include a load balancer on its ingress to
+abstraction. A service can include a load balancer on its ingress to
 distribute traffic between the pods and even react to dynamic changes in
 the number of pods if they are part of a replica set (see the MSO
 example below for a brief explanation of replica sets).
@@ -213,7 +213,7 @@ Persistent Volumes
 
 As pods and containers are ephemeral, any data that must be persisted
 across pod restart events needs to be stored outside of the pod in a
-persistent volume(s). Â Kubernetes supports a wide variety of types of
+persistent volume(s). Kubernetes supports a wide variety of types of
 persistent volumes such as: Fibre Channel, NFS, iSCSI, CephFS, and
 GlusterFS (for a full list look
 `here <https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes>`__)
@@ -231,7 +231,7 @@ OOM Networking with Kubernetes
 Name Spaces
 ~~~~~~~~~~~
 
-Within the namespaces are kubernete's services that provide external
+Within the namespaces are Kubernete's services that provide external
 connectivity to pods that host Docker containers. The following is a
 list of the namespaces and the services within:
 
@@ -378,8 +378,8 @@ Kubernetes Deployment Specifications for ONAP
 
 Each of the ONAP components are deployed as described in a deployment
 specification. Â This specification documents key parameters and
-dependencies between the pods of an ONAP components such that kubernetes
-is able to repeatably startup the component. Â The components artifacts
+dependencies between the pods of an ONAP components such that Kubernetes
+is able to repeatably startup the component. The components artifacts
 are stored here in the oom/kubernetes repo in \ `ONAP
 gerrit <https://gerrit.onap.org/r/gitweb?p=oom.git;a=tree;f=kubernetes;h=4597d09dbce86d7543174924322435c30cb5b0ee;hb=refs/heads/master>`__.
 The mso project is a relatively simple example, so let's start there.
@@ -473,20 +473,20 @@ specification for a mariadb pod.
 
 The spec section starts off with 'replicas: 1' which states that only 1
 'replica' will be use here. Â If one was to change the number of replicas
-to 3 for example, kubernetes would attempt to ensure that three replicas
-of this pod are operational at all times. Â One can see that in a
+to 3 for example, Kubernetes would attempt to ensure that three replicas
+of this pod are operational at all times. One can see that in a
 clustered environment the number of replicas should probably be more
 than 1 but for simple deployments 1 is sufficient.
 
-The selector label is a grouping primitive of kubernetes but this simple
+The selector label is a grouping primitive of Kubernetes but this simple
 example doesn't exercise it's full capabilities.
 
 The template/spec section is where the key information required to start
 this pod is found.
 
--  image: is a reference to the location of the docker image in nexus3
+-  image: is a reference to the location of the Docker image in nexus3
 
--  name: is the name of the docker image
+-  name: is the name of the Docker image
 
 -  env is a section supports the creation of operating system
    environment variables within the container and are specified as a set
@@ -501,14 +501,14 @@ this pod is found.
    space by creating a 'nodePort' - a mechanism used to resolve port
    duplication.
 
--  readinessProbe: is the mechanism kubernetes uses to determine the
+-  readinessProbe: is the mechanism Kubernetes uses to determine the
    state of the container. 
 
 -  volumes: a location to define volumes required by the container, in
    this case configuration and initialization information.
 
 -  imagePullSecrets: an key to access the nexus3 repo when pulling
-   docker containers.
+   Docker containers.
 
 As one might image, the mso-deployment.yaml file describes the
 deployment artifacts of the mso application. Â Here are the contents:
@@ -614,7 +614,7 @@ relationships need to be specified.
 In this particular init-container, the command '/root/ready.py' will be
 executed to determine when mariadb is ready, but this could be a simple
 bash script. The image/name section describes where and how to get the
-docker image from the init-container.
+Docker image from the init-container.
 
 To ensure that data isn't lost when an ephemeral container undergoes
 life-cycle events (like being restarted), non-volatile or persistent
@@ -656,7 +656,7 @@ mso namespace.
         name: "{{ .Values.nsPrefix }}-mso-db"
 
 The last of the four files is the all-services.yaml file which defines
-the kubernetes service(s) that will be exposed in this name space. Here
+the Kubernetes service(s) that will be exposed in this name space. Here
 is the contents of the file:
 
 **all-services.yaml**::
@@ -725,16 +725,16 @@ is the contents of the file:
     type: NodePort
 
 First of all, note that this file is really two service specification in
-a single file: the mariadb service and the mso service. Â In some
+a single file: the mariadb service and the mso service. In some
 circumstances it may be possible to hide some of the complexity of the
 containers/pods by hiding them behind a single service.
 
 The mariadb service specification is quite simple; other than the name
-the only section of interest is the nodePort specification. Â When
-containers require exposing ports to the world outside of a kubernetes
+the only section of interest is the nodePort specification. When
+containers require exposing ports to the world outside of a Kubernetes
 namespace, there is a potential for port conflict. To resolve this
-potential port conflict kubernetes uses the concept of a nodePort that
-is mapped one-to-one with a port within the namespace. Â In this case the
+potential port conflict Kubernetes uses the concept of a nodePort that
+is mapped one-to-one with a port within the namespace. In this case the
 port 3306 (which was defined in the db-deployment.yaml file) is mapped
 to 30252 externally thus avoiding the conflict that would have arisen
 from deployment multiple mariadb containers.
@@ -768,10 +768,10 @@ Development Deployments
 
 For the Amsterdam release, the deployment specifications represent a
 simple simplex deployment of ONAP that may not have the robustness
-typically required of a full operational deployment. Â Follow on releases
+typically required of a full operational deployment. Follow on releases
 will enhance these deployment specifications as follows:
 
--  Load Balancers - kubernets has built in support for user defined or
+-  Load Balancers - Kubernetes has built in support for user defined or
    simple 'ingress' load balances at the service layer to hide the
    complexity of multi-pod deployments from other components.
 
@@ -786,20 +786,20 @@ will enhance these deployment specifications as follows:
 Kubernetes Under-Cloud Deployments
 ==================================
 
-The automated ONAP deployment depends on a fully functional kubernetes
+The automated ONAP deployment depends on a fully functional Kubernetes
 environment being available prior to ONAP installation. Fortunately,
-kubenetes is supported on a wide variety of systems such as Google
+Kubenetes is supported on a wide variety of systems such as Google
 Compute Engine, `AWS
 EC2 <https://wiki.onap.org/display/DW/ONAP+on+AWS#ONAPonAWS-Option0:DeployOOMKubernetestoaspotVM>`__,
 Microsoft Azure, CenturyLink Cloud, IBM Bluemix and more. Â If you're
-setting up your own kubernetes environment, please refer to \ `ONAP on
+setting up your own Kubernetes environment, please refer to \ `ONAP on
 Kubernetes <file:///C:\display\DW\ONAP+on+Kubernetes>`__ for a walk
 through of how to set this environment up on several platforms.
 
 ONAP 'OneClick' Deployment Walk-though
 ======================================
 
-Once a kubernetes environment is available and the deployment artifacts
+Once a Kubernetes environment is available and the deployment artifacts
 have been customized for your location, ONAP is ready to be installed. 
 
 The first step is to setup
@@ -807,44 +807,29 @@ the \ `/oom/kubernetes/config/onap-parameters.yaml <https://gerrit.onap.org/r/g
 with key-value pairs specific to your OpenStack environment. Â There is
 a \ `sample  <https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/config/onap-parameters-sample.yaml;h=3a74beddbbf7f9f9ec8e5a6abaecb7cb238bd519;hb=refs/heads/master>`__\ that
 may help you out or even be usable directly if you don't intend to
-actually use OpenStack resources. Â Here is the contents of this file:
-
-**onap-parameters-sample.yaml**
-
-  .. literalinclude:: https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob_plain;f=kubernetes/config/onap-parameters-sample.yaml;hb=refs/heads/master
-
-OPENSTACK\_UBUNTU\_14\_IMAGE: "Ubuntu\_14.04.5\_LTS"
-
-OPENSTACK\_PUBLIC\_NET\_ID: "e8f51956-00dd-4425-af36-045716781ffc"
-
-OPENSTACK\_OAM\_NETWORK\_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
-
-OPENSTACK\_OAM\_SUBNET\_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
-
-OPENSTACK\_OAM\_NETWORK\_CIDR: "192.168.30.0/24"
-
-OPENSTACK\_USERNAME: "vnf\_user"
-
-OPENSTACK\_API\_KEY: "vnf\_password"
-
-OPENSTACK\_TENANT\_NAME: "vnfs"
-
-OPENSTACK\_REGION: "RegionOne"
-
-OPENSTACK\_KEYSTONE\_URL: "http://1.2.3.4:5000"
-
-OPENSTACK\_FLAVOUR\_MEDIUM: "m1.medium"
-
-OPENSTACK\_SERVICE\_TENANT\_NAME: "services"
-
-DMAAP\_TOPIC: "AUTO"
-
-DEMO\_ARTIFACTS\_VERSION: "1.1.0-SNAPSHOT"
+actually use OpenStack resources. Here is the contents of this file:
+
+**onap-parameters-sample.yaml**::
+
+  OPENSTACK_UBUNTU_14_IMAGE: "Ubuntu_14.04.5_LTS"
+  OPENSTACK_PUBLIC_NET_ID: "e8f51956-00dd-4425-af36-045716781ffc"
+  OPENSTACK_OAM_NETWORK_ID: "d4769dfb-c9e4-4f72-b3d6-1d18f4ac4ee6"
+  OPENSTACK_OAM_SUBNET_ID: "191f7580-acf6-4c2b-8ec0-ba7d99b3bc4e"
+  OPENSTACK_OAM_NETWORK_CIDR: "192.168.30.0/24"
+  OPENSTACK_USERNAME: "vnf_user"
+  OPENSTACK_API_KEY: "vnf_password"
+  OPENSTACK_TENANT_NAME: "vnfs"
+  OPENSTACK_REGION: "RegionOne"
+  OPENSTACK_KEYSTONE_URL: "http://1.2.3.4:5000"
+  OPENSTACK_FLAVOUR_MEDIUM: "m1.medium"
+  OPENSTACK_SERVICE_TENANT_NAME: "services"
+  DMAAP_TOPIC: "AUTO"
+  DEMO_ARTIFACTS_VERSION: "1.1.0-SNAPSHOT"
 
 Note that these values are required or the following steps will fail.
 
 In-order to be able to support multiple ONAP instances within a single
-kubernetes environment a configuration set is required. Â The
+Kubernetes environment a configuration set is required. The
 `createConfig.sh <https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/config/createConfig.sh;h=f226ccae47ca6de15c1da49be4b8b6de974895ed;hb=refs/heads/master>`__
 script is used to do this.
 
@@ -854,7 +839,7 @@ script is used to do this.
 
 The bash
 script \ `createAll.bash <https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/oneclick/createAll.bash;h=5e5f2dc76ea7739452e757282e750638b4e3e1de;hb=refs/heads/master>`__ is
-used to create an ONAP deployment with kubernetes. It has two primary
+used to create an ONAP deployment with Kubernetes. It has two primary
 functions:
 
 -  Creating the namespaces used to encapsulate the ONAP components, and
@@ -873,7 +858,7 @@ that can be used to separate multiple deployments of onap. The result
 will be set of 10 namespaces (e.g. onapTrial-sdc, onapTrial-aai,
 onapTrial-mso, onapTrial-message-router, onapTrial-robot, onapTrial-vid,
 onapTrial-sdnc, onapTrial-portal, onapTrial-policy, onapTrial-appc)
-being created within the kubernetes environment. Â A prerequisite pod
+being created within the Kubernetes environment. A prerequisite pod
 config-init (\ `pod-config-init.yaml <https://gerrit.onap.org/r/gitweb?p=oom.git;a=blob;f=kubernetes/config/pod-config-init.yaml;h=b1285ce21d61815c082f6d6aa3c43d00561811c7;hb=refs/heads/master>`__)
 may editing to match you environment and deployment into the default
 namespace before running createAll.bash.
@@ -891,16 +876,16 @@ integration will be done:
 A registrator to push the service endpoint info to MSB service
 discovery. 
 
--  The needed service endpoint info is put into the kubernetes yaml file
+-  The needed service endpoint info is put into the Kubernetes YAML file
    as annotation, including service name, Protocol,version, visual
    range,LB method, IP, Port,etc.
 
 -  OOM deploy/start/restart/scale in/scale out/upgrade ONAP components
 
--  Registrator watch the kubernetes event
+-  Registrator watch the Kubernetes event
 
 -  When an ONAP component instance has been started/destroyed by OOM,
-   Registrator get the notification from kubernetes
+   Registrator get the notification from Kubernetes
 
 -  Registrator parse the service endpoint info from annotation and
    register/update/unregister it to MSB service discovery
@@ -925,9 +910,9 @@ A message sequence chart of the registration process:
 
 MSB Usage Instructions
 ----------------------
-MSB provides kubernetes charts in OOM, so it can be spun up by oom oneclick command. 
+MSB provides Kubernetes charts in OOM, so it can be spun up by oom oneclick command.
 
-Please note that kubernetes authentication token must be set at *kubernetes/kube2msb/values.yaml* so the kube2msb registrator can get the access to watch the kubernetes events and get service annotation by kubernetes APIs. The token can be found in the kubectl configuration file *~/.kube/config*
+Please note that Kubernetes authentication token must be set at *kubernetes/kube2msb/values.yaml* so the kube2msb registrator can get the access to watch the Kubernetes events and get service annotation by Kubernetes APIs. The token can be found in the kubectl configuration file *~/.kube/config*
 
 MSB and kube2msb can be spun up with all the ONAP components together, or separately using the following commands.
 
index 928d5dc..f3e3d8f 100644 (file)
@@ -24,16 +24,16 @@ The main goal of the Amsterdam release was to:
 
 **Bug Fixes**
 
-       - The full list of implemented user stories and epics is available on `JIRA <https://jira.onap.org/secure/RapidBoard.jspa?rapidView=41&view=planning.nodetail&epics=visible>`_
-         This is the first release of OOM, the defects fixed in this release were raised during the course of the release.
-         Anything not closed is captured below under Known Issues. If you want to review the defects fixed in the Amsterdam release, refer to Jira link above.
+    The full list of implemented user stories and epics is available on `JIRA <https://jira.onap.org/secure/RapidBoard.jspa?rapidView=41&view=planning.nodetail&epics=visible>`_
+    This is the first release of OOM, the defects fixed in this release were raised during the course of the release.
+    Anything not closed is captured below under Known Issues. If you want to review the defects fixed in the Amsterdam release, refer to Jira link above.
 
 **Known Issues**
-       - `OOM-6 <https://jira.onap.org/browse/OOM-6>`_ Automated platform deployment on Docker/Kubernetes - VFC, AAF, MSB minor issues.
+    `OOM-6 <https://jira.onap.org/browse/OOM-6>`_ Automated platform deployment on Docker/Kubernetes - VFC, AAF, MSB minor issues.
 
         Workaround:
 
-        Manual configuration changes - however the reference vFirewall use case does not currently require these components
+    Manual configuration changes - however the reference vFirewall use case does not currently require these components
 .
     - `OOM-10 <https://jira.onap.org/browse/OOM-10>`_ Platform configuration management.
 
@@ -45,7 +45,7 @@ The main goal of the Amsterdam release was to:
 
 
 **Security Issues**
-       N/A
+    N/A
 
 
 **Upgrade Notes**
index c1d5fdf..0f65120 100644 (file)
@@ -3,7 +3,7 @@ Prerequisites:
 - Helm
 
 In order to use Helm with Rancher, check the tiller version installed
-by runing "helm version" on the ranchr CLI
+by running "helm version" on the rancher CLI
 and install the appropriate Helm.
 Notice both tiller and helm are installed,
 but you will need to install on your VM.
diff --git a/kubernetes/aaf/templates/aaf-configmap.yaml b/kubernetes/aaf/templates/aaf-configmap.yaml
new file mode 100644 (file)
index 0000000..c8565f2
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAafAaf }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aaf-data-configmap
+  namespace: {{ .Values.nsPrefix }}-aaf
+data:
+{{ (.Files.Glob "resources/config/aaf-data/*").AsConfig | indent 2 }}
+#{{ end }}
index acd6a7a..55619b3 100644 (file)
@@ -31,8 +31,8 @@ spec:
           periodSeconds: 10
       volumes:
         - name: aaf-cs-data
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/aaf/data
+          secret:
+            secretName: aaf-cs-data-secret
       imagePullSecrets:
       - name: {{ .Values.nsPrefix }}-docker-registry-key
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 6664f18..4f64048 100644 (file)
@@ -48,8 +48,8 @@ spec:
           periodSeconds: 10
       volumes:
         - name: aaf-data
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/aaf/data2
+          configMap:
+            name: aaf-data-configmap
       imagePullSecrets:
       - name: {{ .Values.nsPrefix }}-docker-registry-key
 #{{ end }}
diff --git a/kubernetes/aaf/templates/aaf-secret.yaml b/kubernetes/aaf/templates/aaf-secret.yaml
new file mode 100644 (file)
index 0000000..705d050
--- /dev/null
@@ -0,0 +1,10 @@
+#{{ if not .Values.disableAafAafCs }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: aaf-cs-data-secret
+  namespace: {{ .Values.nsPrefix }}-aaf
+type: Opaque
+data:
+{{ (.Files.Glob "resources/config/aaf-cs-data/*").AsSecrets | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/aai/resources/config/log/filebeat/filebeat.yml b/kubernetes/aai/resources/config/log/filebeat/filebeat.yml
new file mode 100644 (file)
index 0000000..f316b86
--- /dev/null
@@ -0,0 +1,41 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.onap-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
diff --git a/kubernetes/aai/templates/aai-deployment-configmap.yaml b/kubernetes/aai/templates/aai-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..40d4909
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAaiAaiService }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-deployment-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/haproxy/*").AsConfig . | indent 2 }}
+#{{ end }}
index a65cf44..4e8d82f 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: aai-service
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.aaiServiceReplicas }}
   selector:
     matchLabels:
       app: aai-service
@@ -42,6 +43,7 @@ spec:
         - mountPath: /dev/log
           name: aai-service-log
         - mountPath: /usr/local/etc/haproxy/haproxy.cfg
+          subPath: haproxy.cfg
           name: haproxy-cfg
         ports:
         - containerPort: 8080
@@ -59,8 +61,8 @@ spec:
           hostPath:
             path: "/dev/log"
         - name: haproxy-cfg
-          hostPath:
-            path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/haproxy/haproxy.cfg"
+          configMap:
+            name: aai-deployment-configmap
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
diff --git a/kubernetes/aai/templates/aai-filebeat-configmap.yaml b/kubernetes/aai/templates/aai-filebeat-configmap.yaml
new file mode 100644 (file)
index 0000000..5a123a5
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAaiAaiResources }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
+#{{ end }}
index 7ff5e66..dd7d7cc 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: aai-resources
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.aaiResourceReplicas }}
   selector:
     matchLabels:
       app: aai-resources
@@ -50,7 +51,10 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /var/chef/aai-data/
+        - mountPath: /var/chef/aai-data/chef-config/dev/.knife/solo.rb
+          subPath: solo.rb
+          name: aai-chef-config
+        - mountPath: /var/chef/aai-data/environments/
           name: aai-data
         - mountPath: /var/log/onap
           name: aai-resources-logs
@@ -69,6 +73,7 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy }}
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
+          subPath: filebeat.yml
           name: filebeat-conf
         - mountPath: /var/log/onap
           name: aai-resources-logs
@@ -78,28 +83,23 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
+      - name: aai-chef-config
+        configMap:
+          name: aai-chef-config-configmap
       - name: aai-data
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-data/"
+        configMap:
+          name: aai-resources-environments-configmap
       - name: filebeat-conf
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+        configMap:
+          name: aai-filebeat-configmap
       - name: aai-resources-logs
         emptyDir: {}
       - name: aai-resources-filebeat
         emptyDir: {}
       - name: aai-resources-log-conf
         configMap:
-         name: aai-resources-configmap
+         name: aai-resources-log-configmap
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aai-resources-configmap
-  namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/resources/conf/logback.xml").AsConfig | indent 2 }}
 #{{ end }}
diff --git a/kubernetes/aai/templates/aai-resources-traversal-configmap.yaml b/kubernetes/aai/templates/aai-resources-traversal-configmap.yaml
new file mode 100644 (file)
index 0000000..ebd6913
--- /dev/null
@@ -0,0 +1,33 @@
+#{{ if not .Values.disableAaiAaiResources }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-chef-config-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/aai-data/chef-config/dev/.knife/solo.rb").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-resources-environments-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/aai-data/environments/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-resources-log-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/resources/logback.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-traversal-log-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/traversal/logback.xml").AsConfig . | indent 2 }}
+#{{ end }}
index f659392..38e7fb0 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: aai-traversal
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.aaiTraversalReplicas }}
   selector:
     matchLabels:
       app: aai-traversal
@@ -52,7 +53,10 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /var/chef/aai-data/
+        - mountPath: /var/chef/aai-data/chef-config/dev/.knife/solo.rb
+          subPath: solo.rb
+          name: aai-chef-config
+        - mountPath: /var/chef/aai-data/environments/
           name: aai-data
         - mountPath: /var/log/onap
           name: aai-traversal-logs
@@ -71,6 +75,7 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy }}
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
+          subPath: filebeat.yml
           name: filebeat-conf
         - mountPath: /var/log/onap
           name: aai-traversal-logs
@@ -80,28 +85,23 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
+      - name: aai-chef-config
+        configMap:
+          name: aai-chef-config-configmap
       - name: aai-data
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/aai-data/"
+        configMap:
+          name: aai-resources-environments-configmap
       - name: filebeat-conf
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+        configMap:
+          name: aai-filebeat-configmap
       - name: aai-traversal-logs
         emptyDir: {}
       - name: aai-traversal-filebeat
         emptyDir: {}
       - name: aai-traversal-log-conf
         configMap:
-         name: aai-traversal-configmap
+         name: aai-traversal-log-configmap
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aai-traversal-configmap
-  namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/traversal/conf/logback.xml").AsConfig | indent 2 }}
 #{{ end }}
index 0cf62d0..77f7dc1 100644 (file)
@@ -184,6 +184,7 @@ spec:
   type: NodePort
   selector:
     app: aai-service
+  clusterIP: {{ .Values.aaiServiceClusterIp }}
 #{{ end }}
 #{{ if not .Values.disableAaiModelLoaderService }}
 ---
diff --git a/kubernetes/aai/templates/data-router-configmap.yaml b/kubernetes/aai/templates/data-router-configmap.yaml
new file mode 100644 (file)
index 0000000..5782213
--- /dev/null
@@ -0,0 +1,59 @@
+#{{ if not .Values.disableAaiDataRouter }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: data-router-prop-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/appconfig/data-router.properties").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: data-router-model-v8-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/appconfig/model/aai_oxm_v8.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: data-router-model-v9-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/appconfig/model/aai_oxm_v9.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: data-router-model-v10-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/appconfig/model/aai_oxm_v10.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: data-router-model-v11-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/appconfig/model/aai_oxm_v11.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: data-router-secret
+  namespace: {{ .Values.nsPrefix }}-aai
+type: Opaque
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/appconfig/auth/*").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: data-router-dynamic-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/data-router/dynamic/routes/entity-event.route").AsConfig . | indent 2 }}
+{{ tpl (.Files.Glob "resources/config/data-router/dynamic/conf/entity-event-policy.xml").AsConfig . | indent 2 }}
+#{{ end }}
index 6b3c024..5a7f9e9 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: data-router
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.dataRouterReplicas }}
   selector:
     matchLabels:
       app: data-router
@@ -14,6 +15,28 @@ spec:
         app: data-router
       name: data-router
     spec:
+      initContainers:
+      - command:
+        - /bin/sh
+        - -c
+        - |
+          mkdir -p /logroot/data-router/logs
+          chmod -R 777 /logroot/data-router/logs
+          chown -R root:root /logroot
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        securityContext:
+          privileged: true
+        image: {{ .Values.image.es_bb }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: init-sysctl
+        volumeMounts:
+        - name: data-router-logs
+          mountPath: /logroot/
       containers:
       - name: data-router
         image: "{{ .Values.image.dataRouterImage }}:{{ .Values.image.dataRouterVersion }}"
@@ -37,10 +60,29 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /opt/app/data-router/config/
-          name: data-router-config
-        - mountPath: /opt/app/data-router/dynamic/
-          name: data-router-dynamic
+        - mountPath: /opt/app/data-router/config/model/aai_oxm_v8.xml
+          subPath: aai_oxm_v8.xml
+          name: data-router-model-v8
+        - mountPath: /opt/app/data-router/config/model/aai_oxm_v9.xml
+          subPath: aai_oxm_v9.xml
+          name: data-router-model-v9
+        - mountPath: /opt/app/data-router/config/model/aai_oxm_v10.xml
+          subPath: aai_oxm_v10.xml
+          name: data-router-model-v10
+        - mountPath: /opt/app/data-router/config/model/aai_oxm_v11.xml
+          subPath: aai_oxm_v11.xml
+          name: data-router-model-v11
+        - mountPath:  /opt/app/data-router/config/auth
+          name: data-router-auth
+        - mountPath:  /opt/app/data-router/config/data-router.properties
+          name: data-router-properties
+          subPath: data-router.properties
+        - mountPath: /opt/app/data-router/dynamic/routes/entity-event.route
+          subPath: entity-event.route
+          name: data-router-dynamic-route
+        - mountPath: /opt/app/data-router/dynamic/conf/entity-event-policy.xml
+          subPath: entity-event-policy.xml
+          name: data-router-dynamic-policy
         - mountPath: /logs/
           name: data-router-logs
         ports:
@@ -54,16 +96,34 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: data-router-config
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/data-router/appconfig/"
-      - name: data-router-dynamic
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/data-router/dynamic/"
+      - name: data-router-model-v8
+        configMap:
+          name: data-router-model-v8-configmap
+      - name: data-router-model-v9
+        configMap:
+          name: data-router-model-v9-configmap
+      - name: data-router-model-v10
+        configMap:
+          name: data-router-model-v10-configmap
+      - name: data-router-model-v11
+        configMap:
+          name: data-router-model-v11-configmap
+      - name: data-router-auth
+        secret:
+          secretName: data-router-secret
+      - name: data-router-properties
+        configMap:
+          name: data-router-prop-configmap
+      - name: data-router-dynamic-route
+        configMap:
+          name: data-router-dynamic-configmap
+      - name: data-router-dynamic-policy
+        configMap:
+          name: data-router-dynamic-configmap
       - name: data-router-logs
         hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/data-router/logs/"
+          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/"
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/aai/templates/elasticsearch-configmap.yaml b/kubernetes/aai/templates/elasticsearch-configmap.yaml
new file mode 100644 (file)
index 0000000..4a02346
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAaiElasticsearch }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-elasticsearch-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/elasticsearch/config/elasticsearch.yml").AsConfig . | indent 2 }}
+#{{ end }}
index 680f942..d3535d0 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: elasticsearch
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.elasticsearchReplicas }}
   selector:
     matchLabels:
       app: elasticsearch
@@ -14,6 +15,28 @@ spec:
         app: elasticsearch
       name: elasticsearch
     spec:
+      initContainers:
+      - command:
+        - /bin/sh
+        - -c
+        - |
+          mkdir -p /logroot/elasticsearch/es-data
+          chmod -R 777 /logroot/elasticsearch/es-data
+          chown -R root:root /logroot
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        securityContext:
+          privileged: true
+        image: {{ .Values.image.es_bb }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: init-sysctl
+        volumeMounts:
+        - name: elasticsearch-data
+          mountPath: /logroot/
       hostname: elasticsearch
       containers:
       - name: elasticsearch
@@ -31,6 +54,7 @@ spec:
           mountPath: /etc/localtime
           readOnly: true
         - name: elasticsearch-config
+          subPath: elasticsearch.yml
           mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
         - name: elasticsearch-data
           mountPath: /usr/share/elasticsearch/data
@@ -39,11 +63,11 @@ spec:
         hostPath:
           path: /etc/localtime
       - name: elasticsearch-config
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/elasticsearch/config/elasticsearch.yml"
+        configMap:
+          name: aai-elasticsearch-configmap
       - name: elasticsearch-data
         hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/elasticsearch/es-data"
+          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/"
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index fd8e10c..5b2a98c 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: hbase
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.hbaseReplicas }}
   selector:
     matchLabels:
       app: hbase
@@ -33,13 +34,18 @@ spec:
           initialDelaySeconds: 5
           periodSeconds: 10
         volumeMounts:
+        - name: hbase-data
+          mountPath: /tmp
         - name: localtime
           mountPath: /etc/localtime
           readOnly: true
       volumes:
+      - name: hbase-data
+        hostPath:
+          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/aai/hbase
       - name: localtime
         hostPath:
           path: /etc/localtime
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/aai/templates/modelloader-deployment-configmap.yaml b/kubernetes/aai/templates/modelloader-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..e5a59a1
--- /dev/null
@@ -0,0 +1,26 @@
+#{{ if not .Values.disableAaiModelLoaderService }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: model-loader-prop-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/model-loader/appconfig/model-loader.properties").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: model-loader-secret
+  namespace: {{ .Values.nsPrefix }}-aai
+type: Opaque
+data:
+{{ tpl (.Files.Glob "resources/config/model-loader/appconfig/auth/*").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-model-loader-log-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/model-loader/logback.xml").AsConfig . | indent 2 }}
+#{{ end }}
index 7c2fddb..c5a788c 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: model-loader-service
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.modelLoaderReplicas }}
   selector:
     matchLabels:
       app: model-loader-service
@@ -25,8 +26,11 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /opt/app/model-loader/config/
-          name: aai-model-loader-config
+        - mountPath: /opt/app/model-loader/config/model-loader.properties
+          subPath: model-loader.properties
+          name: aai-model-loader-prop-config
+        - mountPath: /opt/app/model-loader/config/auth/
+          name: aai-model-loader-auth-config
         - mountPath: /var/log/onap
           name: aai-model-loader-logs
         - mountPath: /opt/app/model-loader/bundleconfig/etc/logback.xml
@@ -40,6 +44,7 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy }}
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
+          subPath: filebeat.yml
           name: filebeat-conf
         - mountPath: /var/log/onap
           name: aai-model-loader-logs
@@ -49,28 +54,23 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: aai-model-loader-config
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/model-loader/appconfig/"
+      - name: aai-model-loader-prop-config
+        configMap:
+          name: model-loader-prop-configmap
+      - name: aai-model-loader-auth-config
+        secret:
+          secretName: model-loader-secret
       - name: filebeat-conf
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+        configMap:
+          name: aai-filebeat-configmap
       - name: aai-model-loader-logs
         emptyDir: {}
       - name: aai-model-loader-filebeat
         emptyDir: {}
       - name: aai-model-loader-log-conf
         configMap:
-         name: aai-model-loader-configmap
+         name: aai-model-loader-log-configmap
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aai-model-loader-configmap
-  namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/model-loader/conf/logback.xml").AsConfig | indent 2 }}
-#{{ end }}
\ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/aai/templates/search-data-service-configmap.yaml b/kubernetes/aai/templates/search-data-service-configmap.yaml
new file mode 100644 (file)
index 0000000..8f707d4
--- /dev/null
@@ -0,0 +1,34 @@
+#{{ if not .Values.disableAaiSearchDataService }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: search-data-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/search-data-service/appconfig/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: search-data-keystone-secret
+  namespace: {{ .Values.nsPrefix }}-aai
+type: Opaque
+data:
+{{ tpl (.Files.Glob "resources/config/search-data-service/appconfig/auth/tomcat_keystore").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-search-policy-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/search-data-service/appconfig/auth/search_policy.json").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-search-data-service-log-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/search-data-service/logback.xml").AsConfig . | indent 2 }}
+#{{ end }}
index e166845..7202e0d 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: search-data-service
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.searchDataServiceReplicas }}
   selector:
     matchLabels:
       app: search-data-service
@@ -29,8 +30,21 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /opt/app/search-data-service/config/
+        - mountPath: /opt/app/search-data-service/config/filter-config.json
+          subPath: filter-config.json
           name: aai-search-data-service-config
+        - mountPath: /opt/app/search-data-service/config/elastic-search.properties
+          subPath: elastic-search.properties
+          name: aai-search-data-service-config
+        - mountPath: /opt/app/search-data-service/config/analysis-config.json
+          subPath: filter-config.json
+          name: aai-search-data-service-config
+        - mountPath: /opt/app/search-data-service/config/auth/tomcat_keystore
+          subPath: tomcat_keystore
+          name: aai-search-data-service-auth-config
+        - mountPath: /opt/app/search-data-service/config/auth/search_policy.json
+          subPath: search_policy.json
+          name: aai-search-data-search-policy-config
         - mountPath: /var/log/onap
           name: aai-search-data-service-logs
         - mountPath: /opt/app/search-data-service/bundleconfig/etc/logback.xml
@@ -48,6 +62,7 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy }}
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
+          subPath: filebeat.yml
           name: filebeat-conf
         - mountPath: /var/log/onap
           name: aai-search-data-service-logs
@@ -58,27 +73,25 @@ spec:
         hostPath:
           path: /etc/localtime
       - name: aai-search-data-service-config
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/search-data-service/appconfig/"
+        configMap:
+          name: search-data-configmap
+      - name: aai-search-data-service-auth-config
+        secret:
+          secretName: search-data-keystone-secret
+      - name: aai-search-data-search-policy-config
+        configMap:
+          name: aai-search-policy-configmap
       - name: filebeat-conf
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+        configMap:
+          name: aai-filebeat-configmap
       - name: aai-search-data-service-logs
         emptyDir: {}
       - name: aai-search-data-service-filebeat
         emptyDir: {}
       - name: aai-search-data-service-log-conf
         configMap:
-         name: aai-search-data-service-configmap
+         name: aai-search-data-service-log-configmap
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aai-search-data-service-configmap
-  namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/search-data-service/conf/logback.xml").AsConfig | indent 2 }}
 #{{ end }}
diff --git a/kubernetes/aai/templates/sparky-be-deployment-configmap.yaml b/kubernetes/aai/templates/sparky-be-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..590608b
--- /dev/null
@@ -0,0 +1,42 @@
+#{{ if not .Values.disableAaiSparkyBe }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sparky-be-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/sparky-be/appconfig/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sparky-be-model-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/sparky-be/appconfig/model/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sparky-be-portal-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/sparky-be/appconfig/portal/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: sparky-be-secret
+  namespace: {{ .Values.nsPrefix }}-aai
+type: Opaque
+data:
+{{ tpl (.Files.Glob "resources/config/sparky-be/appconfig/auth/*").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aai-sparky-be-log-configmap
+  namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/sparky-be/logback.xml").AsConfig . | indent 2 }}
+#{{ end }}
index 484a87f..e533928 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: sparky-be
   namespace: "{{ .Values.nsPrefix }}-aai"
 spec:
+  replicas: {{ .Values.sparkyReplicas }}
   selector:
     matchLabels:
       app: sparky-be
@@ -29,8 +30,30 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /opt/app/sparky/config/
+        - mountPath: /opt/app/sparky/config/auth/
+          name: aai-sparky-be-auth-config
+        - mountPath: /opt/app/sparky/config/synchronizer.properties
+          subPath: synchronizer.properties
           name: aai-sparky-be-config
+        - mountPath: /opt/app/sparky/config/suggestive-search.properties
+          subPath: suggestive-search.properties
+          name: aai-sparky-be-config
+        - mountPath: /opt/app/sparky/config/search-service.properties
+          subPath: search-service.properties
+          name: aai-sparky-be-config
+        - mountPath: /opt/app/sparky/config/roles.config
+          subPath: roles.config
+          name: aai-sparky-be-config
+        - mountPath: /opt/app/sparky/config/elasticsearch.properties
+          subPath: elasticsearch.properties
+          name: aai-sparky-be-config
+        - mountPath: /opt/app/sparky/config/aai.properties
+          subPath: aai.properties
+          name: aai-sparky-be-config
+        - mountPath: /opt/app/sparky/config/model/
+          name: aai-sparky-be-model-config
+        - mountPath: /opt/app/sparky/config/portal/
+          name: aai-sparky-be-portal-config
         - mountPath: /var/log/onap
           name: aai-sparky-be-logs
         - mountPath: /opt/app/sparky/bundleconfig/etc/logback.xml
@@ -48,6 +71,7 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy }}
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
+          subPath: filebeat.yml
           name: filebeat-conf
         - mountPath: /var/log/onap
           name: aai-sparky-be-logs
@@ -58,27 +82,28 @@ spec:
         hostPath:
           path: /etc/localtime
       - name: aai-sparky-be-config
-        hostPath:
-          path: "/dockerdata-nfs/{{ .Values.nsPrefix }}/aai/sparky-be/appconfig/"
+        configMap:
+          name: sparky-be-configmap
+      - name: aai-sparky-be-model-config
+        configMap:
+          name: sparky-be-model-configmap
+      - name: aai-sparky-be-portal-config
+        configMap:
+          name: sparky-be-portal-configmap
+      - name: aai-sparky-be-auth-config
+        secret:
+          secretName: sparky-be-secret
       - name: filebeat-conf
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+        configMap:
+          name: aai-filebeat-configmap
       - name: aai-sparky-be-logs
         emptyDir: {}
       - name: aai-sparky-filebeat
         emptyDir: {}
       - name: aai-sparky-be-log-conf
         configMap:
-         name: aai-sparky-be-configmap
+         name: aai-sparky-be-log-configmap
       restartPolicy: Always
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aai-sparky-be-configmap
-  namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/sparky-be/conf/logback.xml").AsConfig | indent 2 }}
 #{{ end }}
index ce625b7..b058597 100644 (file)
@@ -1,6 +1,19 @@
 nsPrefix: onap
 pullPolicy: Always
 nodePortPrefix: 302
+
+# POLICY hotfix - Note this must be temporary
+# See https://jira.onap.org/browse/POLICY-510
+aaiServiceClusterIp: 10.43.255.254
+aaiServiceReplicas: 1
+aaiResourceReplicas: 1
+aaiTraversalReplicas: 1
+dataRouterReplicas: 1
+elasticsearchReplicas: 1
+hbaseReplicas: 1
+modelLoaderReplicas: 1
+searchDataServiceReplicas: 1
+sparkyReplicas: 1
 image:
   readiness: oomk8s/readiness-check:1.0.0
   aaiProxy: aaionap/haproxy
@@ -23,3 +36,4 @@ image:
   sparkyBeVersion: v1.1.0
   gremlinServerImage: aaionap/gremlin-server
   filebeat: docker.elastic.co/beats/filebeat:5.5.0
+  es_bb: busybox
diff --git a/kubernetes/appc/resources/config/log/filebeat/log4j/filebeat.yml b/kubernetes/appc/resources/config/log/filebeat/log4j/filebeat.yml
new file mode 100644 (file)
index 0000000..79c9a08
--- /dev/null
@@ -0,0 +1,49 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+
+  # The below commented properties are for time-based rolling policy. But as the log4j 1.2x does not support time-based rolling these properties are not set
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  #ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  #clean_inactive: 96h
+
+  #Multiline properties for log4j xml log events
+  multiline.pattern: '</log4j:event>'
+  multiline.negate: true
+  multiline.match: before
+  #multiline.max_lines: 500
+  #multiline.timeout: 5s
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.onap-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
diff --git a/kubernetes/appc/templates/appc-conf-configmap.yaml b/kubernetes/appc/templates/appc-conf-configmap.yaml
new file mode 100644 (file)
index 0000000..0e418d1
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableAppcAppc }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: appc-conf-configmap
+  namespace: {{ .Values.nsPrefix }}-appc
+data:
+{{ tpl (.Files.Glob "resources/config/conf/*").AsConfig . | indent 2 }}
+#{{ end }}
index f4593b0..35e9470 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: appc
   namespace: "{{ .Values.nsPrefix }}-appc"
 spec:
+  replicas: {{ .Values.appcReplicas }}
   selector:
     matchLabels:
       app: appc
@@ -49,15 +50,19 @@ spec:
           name: localtime
           readOnly: true
         - mountPath: /opt/openecomp/appc/data/properties/appc.properties
-          name: appc-properties
+          name: appc-conf
+          subPath: appc.properties
         - mountPath: /opt/openecomp/appc/data/properties/aaiclient.properties
-          name: appc-aaiclient-properties
+          name: appc-conf
+          subPath: aaiclient.properties
         - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
-          name: sdnc-aaiclient-properties
+          name: appc-conf
+          subPath: aaiclient.properties
         - mountPath: /var/log/onap
           name: appc-logs
         - mountPath: /opt/opendaylight/current/etc/org.ops4j.pax.logging.cfg
           name: appc-log-config
+          subPath: org.ops4j.pax.logging.cfg
         ports:
         - containerPort: 8181
         - containerPort: 1830
@@ -72,6 +77,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: appc-logs
         - mountPath: /usr/share/filebeat/data
@@ -81,24 +87,18 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
+          configMap:
+            name: appc-filebeat-configmap
         - name: appc-log-config
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/appc/org.ops4j.pax.logging.cfg
+          configMap:
+            name: appc-logging-cfg-configmap
         - name: appc-logs
           emptyDir: {}
         - name: appc-data-filebeat
           emptyDir: {}
-        - name: appc-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/appc/conf/appc.properties
-        - name: appc-aaiclient-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/appc/conf/aaiclient.properties
-        - name: sdnc-aaiclient-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/aaiclient.properties
+        - name: appc-conf
+          configMap:
+            name: appc-conf-configmap
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
diff --git a/kubernetes/appc/templates/appc-log-configmap.yaml b/kubernetes/appc/templates/appc-log-configmap.yaml
new file mode 100644 (file)
index 0000000..323f852
--- /dev/null
@@ -0,0 +1,17 @@
+#{{ if not .Values.disableAppcAppc }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: appc-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}-appc
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/log4j/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: appc-logging-cfg-configmap
+  namespace: {{ .Values.nsPrefix }}-appc
+data:
+{{ tpl (.Files.Glob "resources/config/log/*").AsConfig . | indent 2 }}
+#{{ end }}
index 607a0df..0b29393 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: appc-dbhost
   namespace: "{{ .Values.nsPrefix }}-appc"
 spec:
+  replicas: {{ .Values.dbReplicas }}
   selector:
     matchLabels:
       app: appc-dbhost
@@ -45,4 +46,4 @@ spec:
           claimName: appc-db
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index ed69c05..3bbc671 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: appc-dgbuilder
   namespace: "{{ .Values.nsPrefix }}-appc"
 spec:
+  replicas: {{ .Values.dgbuilderReplicas }}
   selector:
     matchLabels:
       app: appc-dgbuilder
index 882267c..0bf4da9 100644 (file)
@@ -1,9 +1,12 @@
 nsPrefix: onap
 pullPolicy: Always
 nodePortPrefix: 302
+appcReplicas: 1
+dbReplicas: 1
+dgbuilderReplicas: 1
 image:
   readiness: oomk8s/readiness-check:1.0.0
   appc: nexus3.onap.org:10001/openecomp/appc-image:v1.2.0
   mysqlServer: mysql/mysql-server:5.6
   dgbuilderSdnc: nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:v0.1.0
-  filebeat: docker.elastic.co/beats/filebeat:5.5.0
\ No newline at end of file
+  filebeat: docker.elastic.co/beats/filebeat:5.5.0
diff --git a/kubernetes/clamp/templates/clamp-mariadb-configmap.yaml b/kubernetes/clamp/templates/clamp-mariadb-configmap.yaml
new file mode 100644 (file)
index 0000000..1fdc155
--- /dev/null
@@ -0,0 +1,33 @@
+#{{ if not .Values.disableClampClampMariadb }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: clamp-entrypoint-initdb-configmap
+  namespace: {{ .Values.nsPrefix }}-clamp
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: clamp-entrypoint-drop-configmap
+  namespace: {{ .Values.nsPrefix }}-clamp
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/drop/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: clamp-entrypoint-bulkload-configmap
+  namespace: {{ .Values.nsPrefix }}-clamp
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: clamp-mariadb-conf-configmap
+  namespace: {{ .Values.nsPrefix }}-clamp
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/conf.d/conf1/*").AsConfig . | indent 2 }}
+#{{ end }}
index 46a6433..0566e50 100644 (file)
@@ -25,9 +25,14 @@ spec:
           - name: MYSQL_ROOT_PASSWORD
             value: {{ .Values.mysqlPassword }}
         volumeMounts:
-        - mountPath: /docker-entrypoint-initdb.d
+        - mountPath: /docker-entrypoint-initdb.d/load-sql-files-tests-automation.sh
           name: docker-entrypoint-initdb
-        - mountPath: /etc/mysql/conf.d
+          subPath: load-sql-files-tests-automation.sh
+        - mountPath: /docker-entrypoint-initdb.d/drop/
+          name: docker-entrypoint-clds-drop
+        - mountPath: /docker-entrypoint-initdb.d/bulkload/
+          name: docker-entrypoint-bulkload
+        - mountPath: /etc/mysql/conf.d/conf1/
           name:  clamp-mariadb-conf
         - mountPath: /var/lib/mysql
           name: clamp-mariadb-data
@@ -38,14 +43,20 @@ spec:
           periodSeconds: 10
       volumes:
         - name: docker-entrypoint-initdb
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/clamp/mariadb/docker-entrypoint-initdb.d
+          configMap:
+            name: clamp-entrypoint-initdb-configmap
+        - name: docker-entrypoint-clds-drop
+          configMap:
+            name: clamp-entrypoint-drop-configmap
+        - name: docker-entrypoint-bulkload
+          configMap:
+            name: clamp-entrypoint-bulkload-configmap
         - name: clamp-mariadb-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/clamp/mariadb/conf.d
+          configMap:
+            name: clamp-mariadb-conf-configmap
         - name: clamp-mariadb-data
           persistentVolumeClaim:
             claimName: clamp-db
       imagePullSecrets:
       - name: {{ .Values.nsPrefix }}-docker-registry-key
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index e848bfd..24e1979 100644 (file)
@@ -8,11 +8,15 @@ metadata:
     app: cli
 spec:
   ports:
-  - name: 80-port
-    port: 80
+  - name: 8080-port
+    port: 8080
     targetPort: 80
     nodePort: {{ .Values.nodePortPrefix }}60
+  - name: 9090-port
+    port: 9090
+    targetPort: 8080
+    nodePort: {{ .Values.nodePortPrefix }}61
   type: NodePort
   selector:
     app: cli
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 3a702b9..128c28a 100644 (file)
@@ -25,6 +25,7 @@ spec:
           value: daemon
         ports:
         - containerPort: 80
+        - containerPort: 8080
           name: cli
         readinessProbe:
           tcpSocket:
@@ -33,4 +34,4 @@ spec:
           periodSeconds: 10
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 65acb46..a0202f8 100755 (executable)
@@ -79,7 +79,6 @@ chmod -R 777 /config-init/$NAMESPACE/aai/sparky-be/logs/
 chmod -R 777 /config-init/$NAMESPACE/aai/elasticsearch/es-data/
 chmod -R 777 /config-init/$NAMESPACE/aai/search-data-service/logs/
 chmod -R 777 /config-init/$NAMESPACE/aai/data-router/logs/
-chmod -R 777 /config-init/$NAMESPACE/policy/mariadb/
 chmod -R 777 /config-init/$NAMESPACE/log/elasticsearch
 chown -R root:root /config-init/$NAMESPACE/log
 
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/appc-sdnhost.json
new file mode 100644 (file)
index 0000000..c96a35a
--- /dev/null
@@ -0,0 +1,22 @@
+{
+  "service": {
+    "name": "Health Check: APPC - SDN Host",
+    "checks": [
+      {
+        "id": "appc-sdnhost",
+        "name": "APPC SDN Host Health Check",
+        "http": "http://sdnhost.onap-appc:8282/apidoc/explorer/index.html",
+        "method": "HEAD",
+        "header": {
+          "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+          "Cache-Control": ["no-cache"],
+          "Content-Type": ["application/json"],
+          "Accept": ["application/json"]
+        },
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-elastic-search.json
new file mode 100644 (file)
index 0000000..7785502
--- /dev/null
@@ -0,0 +1,23 @@
+{
+  "service": {
+    "name": "Health Check: Log - Elastic Search",
+    "checks": [
+      {
+        "id": "log-elasticsearch-server",
+        "name": "Log Elastic Search Health Check",
+        "http": "http://elasticsearch.onap-log:9200/_cluster/health?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-elasticsearch-tcp",
+        "name": "Log Elastic Search TCP Health Check",
+        "tcp": "elasticsearchtcp.onap-log:9300",
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-kibana.json
new file mode 100644 (file)
index 0000000..794fb4b
--- /dev/null
@@ -0,0 +1,16 @@
+{
+  "service": {
+    "name": "Health Check: Log - Kibana",
+    "checks": [
+      {
+        "id": "log-kibana-server",
+        "name": "Log kibana Health Check",
+        "http": "http://kibana.onap-log:5601/status",
+        "method": "HEAD",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/log-logstash.json
new file mode 100644 (file)
index 0000000..3c0f450
--- /dev/null
@@ -0,0 +1,95 @@
+{
+  "service": {
+    "name": "Health Check: Log - Log Stash",
+    "checks": [
+      {
+        "id": "log-logstash-internal-server-gi",
+        "name": "Log Stash Health Check - General Information",
+        "http": "http://logstashinternal.onap-log:9600/?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-node-info",
+        "name": "Log Stash Health Check - Node Information",
+        "http": "http://logstashinternal.onap-log:9600/_node/?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-os-info",
+        "name": "Log Stash Health Check - OS Information",
+        "http": "http://logstashinternal.onap-log:9600/_node/os?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-jvm-info",
+        "name": "Log Stash Health Check - JVM Information",
+        "http": "http://logstashinternal.onap-log:9600/_node/jvm?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-plugin-info",
+        "name": "Log Stash Health Check - Plugin Information",
+        "http": "http://logstashinternal.onap-log:9600/_node/plugins?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-node-stat",
+        "name": "Log Stash Health Check - Node Stats",
+        "http": "http://logstashinternal.onap-log:9600/_node/stats?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-jvm-stat",
+        "name": "Log Stash Health Check - JVM Stats",
+        "http": "http://logstashinternal.onap-log:9600/_node/stats/jvm?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-process-stat",
+        "name": "Log Stash Health Check - Process Stats",
+        "http": "http://logstashinternal.onap-log:9600/_node/stats/process?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-internal-server-os-stat",
+        "name": "Log Stash Health Check - OS Stats",
+        "http": "http://logstashinternal.onap-log:9600/_node/stats/os?pretty",
+        "method": "GET",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "log-logstash-tcp",
+        "name": "Log Stash File Beat TCP Health Check",
+        "tcp": "logstash.onap-log:5044",
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/msb-health.json
new file mode 100644 (file)
index 0000000..a5738b3
--- /dev/null
@@ -0,0 +1,39 @@
+{
+  "service": {
+    "name": "Health Check: MSB",
+    "checks": [
+      {
+        "id": "msb-eag.onap-msb",
+        "name": "MSB eag Health Check",
+        "http": "http://msb-eag.onap-msb:80/iui/microservices/default.html",
+        "method": "HEAD",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "msb-iag.onap-msb",
+        "name": "MSB iag Health Check",
+        "http": "http://msb-iag.onap-msb:80/iui/microservices/default.html",
+        "method": "HEAD",
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "msb-consul.onap-msb",
+        "name": "MSB consul Health Check",
+        "tcp": "msb-consul.onap-msb:8500",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "msb-discovery.onap-msb",
+        "name": "MSB discovery Health Check",
+        "tcp": "msb-discovery.onap-msb:10081",
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/multicloud-health-check.json
new file mode 100644 (file)
index 0000000..843afa3
--- /dev/null
@@ -0,0 +1,63 @@
+{
+  "service": {
+    "name": "Health Check: MULTICLOUD",
+    "checks": [
+      {
+        "id": "framework",
+        "name": "Framework Health Check",
+        "http": "http://framework.onap-multicloud:9001/api/multicloud/v0/swagger.json",
+        "method": "HEAD",
+        "header": {
+          "Cache-Control": ["no-cache"],
+          "Content-Type": ["application/json"],
+          "Accept": ["application/json"]
+        },
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "multicloud-ocata",
+        "name": "Multicloud Ocata Health Check",
+        "http": "http://multicloud-ocata.onap-multicloud:9006/api/multicloud-ocata/v0/swagger.json",
+        "method": "HEAD",
+        "header": {
+          "Cache-Control": ["no-cache"],
+          "Content-Type": ["application/json"],
+          "Accept": ["application/json"]
+        },
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "multicloud-vio",
+        "name": "Multicloud Vio Health Check",
+        "http": "http://multicloud-vio.onap-multicloud:9004/api/multicloud-vio/v0/swagger.json",
+        "method": "HEAD",
+        "header": {
+          "Cache-Control": ["no-cache"],
+          "Content-Type": ["application/json"],
+          "Accept": ["application/json"]
+        },
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "multicloud-windriver",
+        "name": "Multicloud Windriver Health Check",
+        "http": "http://multicloud-windriver.onap-multicloud:9005/api/multicloud-titanium_cloud/v0/swagger.json",
+        "method": "HEAD",
+        "header": {
+          "Cache-Control": ["no-cache"],
+          "Content-Type": ["application/json"],
+          "Accept": ["application/json"]
+        },
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/sdnc-sdnhost.json
new file mode 100644 (file)
index 0000000..93d5b1a
--- /dev/null
@@ -0,0 +1,22 @@
+{
+  "service": {
+    "name": "Health Check: SDNC - SDN Host",
+    "checks": [
+      {
+        "id": "sdnc-sdnhost",
+        "name": "SDNC SDN Host Health Check",
+        "http": "http://sdnhost.onap-sdnc:8282/apidoc/explorer/index.html",
+        "method": "HEAD",
+        "header": {
+          "Authorization": ["Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="],
+          "Cache-Control": ["no-cache"],
+          "Content-Type": ["application/json"],
+          "Accept": ["application/json"]
+        },
+        "tls_skip_verify": true,
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json b/kubernetes/config/docker/init/src/config/consul/consul-agent-config/vfc-health.json
new file mode 100644 (file)
index 0000000..c7f83a8
--- /dev/null
@@ -0,0 +1,112 @@
+{
+  "service": {
+    "name": "Health Check: VFC",
+    "checks": [
+      {
+        "id": "vfc-catalog.onap-vfc",
+        "name": "VFC catalog Health Check",
+        "tcp": "vfc-catalog.onap-vfc:8806",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-emsdriver.onap-vfc",
+        "name": "VFC emsdriver Health Check",
+        "tcp": "vfc-emsdriver.onap-vfc:8206",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-gvnfmdriver.onap-vfc",
+        "name": "VFC gvnfmdriver Health Check",
+        "tcp": "vfc-gvnfmdriver.onap-vfc:8484",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-hwvnfmdriver.onap-vfc",
+        "name": "VFC hwvnfmdriver Health Check",
+        "tcp": "vfc-hwvnfmdriver.onap-vfc:8482",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-jujudriver.onap-vfc",
+        "name": "VFC jujudriver Health Check",
+        "tcp": "vfc-jujudriver.onap-vfc:8483",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-nokiavnfmdriver.onap-vfc",
+        "name": "VFC nokiavnfmdriver Health Check",
+        "tcp": "vfc-nokiavnfmdriver.onap-vfc:8486",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-nslcm.onap-vfc",
+        "name": "VFC nslcm Health Check",
+        "tcp": "vfc-nslcm.onap-vfc:8403",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-resmgr.onap-vfc",
+        "name": "VFC resmgr Health Check",
+        "tcp": "vfc-resmgr.onap-vfc:8480",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-vnflcm.onap-vfc",
+        "name": "VFC vnflcm Health Check",
+        "tcp": "vfc-vnflcm.onap-vfc:8801",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-vnfmgr.onap-vfc",
+        "name": "VFC vnfmgr Health Check",
+        "tcp": "vfc-vnfmgr.onap-vfc:8803",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-vnfres.onap-vfc",
+        "name": "VFC vnfres Health Check",
+        "tcp": "vfc-vnfres.onap-vfc:8802",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-workflow.onap-vfc",
+        "name": "VFC workflow Health Check",
+        "tcp": "vfc-workflow.onap-vfc:10550",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-workflowengineactiviti.onap-vfc",
+        "name": "VFC workflow-engine Health Check",
+        "tcp": "vfc-workflowengineactiviti.onap-vfc:8080",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-ztesdncdriver.onap-vfc",
+        "name": "VFC ztesdncdriver Health Check",
+        "tcp": "vfc-ztesdncdriver.onap-vfc:8411",
+        "interval": "15s",
+        "timeout": "1s"
+      },
+      {
+        "id": "vfc-ztevnfmdriver.onap-vfc",
+        "name": "VFC ztevnfmdriver Health Check",
+        "tcp": "vfc-ztevnfmdriver.onap-vfc:8410",
+        "interval": "15s",
+        "timeout": "1s"
+      }
+    ]
+  }
+}
diff --git a/kubernetes/config/docker/init/src/config/log/policy/drools/logback.xml b/kubernetes/config/docker/init/src/config/log/policy/drools/logback.xml
deleted file mode 100644 (file)
index 2b6d11e..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>\r
-<!--\r
-  ============LICENSE_START=======================================================\r
-  policy-management\r
-  ================================================================================\r
-  Copyright (C) 2017 AT&amp;T Intellectual Property. All rights reserved.\r
-  ================================================================================\r
-  Licensed under the Apache License, Version 2.0 (the "License");\r
-  you may not use this file except in compliance with the License.\r
-  You may obtain a copy of the License at\r
-  \r
-       http://www.apache.org/licenses/LICENSE-2.0\r
-  \r
-  Unless required by applicable law or agreed to in writing, software\r
-  distributed under the License is distributed on an "AS IS" BASIS,\r
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-  See the License for the specific language governing permissions and\r
-  limitations under the License.\r
-  ============LICENSE_END=========================================================\r
-  -->\r
-<configuration debug="true" scan="true" scanPeriod="3 seconds">\r
-   <!--<jmxConfigurator /> -->\r
-   <!--  specify the base path of the log directory --> \r
-   <property name="logDir" value="/var/log/onap" />\r
-   <!--  specify the component name -->\r
-   <property name="componentName" value="policy" />\r
-   <!-- specify the sub component name -->\r
-   <property name="subComponentName" value="drools" />\r
-   <!-- The directories where logs are written --> \r
-   <property name="logDirectory" value="${logDir}/${componentName}/${subComponentName}" />\r
-   <property name="pattern" value="%d{&amp;quot;yyyy-MM-dd'T'HH:mm:ss.SSSXXX&amp;quot;, UTC}\t[%thread]\t%-5level\t%logger{36}\t%replace(%replace(%replace(%mdc){'\t','\\\\t'}){', ','\t'}){'\n', '\\\\n'}\t%replace(%replace(%msg){'\n', '\\\\n'}){'\t','\\\\t'}%n" />\r
-   <!--  log file names -->\r
-   <property name="errorLogName" value="error" />\r
-   <property name="metricsLogName" value="metrics" />\r
-   <property name="auditLogName" value="audit" />\r
-   <property name="debugLogName" value="debug" />\r
-   <property name="queueSize" value="256" />\r
-   <property name="maxFileSize" value="50MB" />\r
-   <property name="maxHistory" value="30" />\r
-   <property name="totalSizeCap" value="10GB" />\r
-   <!-- Example evaluator filter applied against console appender -->\r
-   <appender class="ch.qos.logback.core.ConsoleAppender" name="STDOUT">\r
-      <encoder>\r
-         <pattern>${pattern}</pattern>\r
-      </encoder>\r
-   </appender>\r
-   <!-- ============================================================================ -->\r
-   <!-- EELF Appenders -->\r
-   <!-- ============================================================================ -->\r
-   <!-- The EELFAppender is used to record events to the general application \r
-    log -->\r
-   <!-- EELF Audit Appender. This appender is used to record audit engine \r
-    related logging events. The audit logger and appender are specializations \r
-    of the EELF application root logger and appender. This can be used to segregate \r
-    Policy engine events from other components, or it can be eliminated to record \r
-    these events as part of the application root log. -->\r
-   <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFAudit">\r
-      <file>${logDirectory}/${auditLogName}.log</file>\r
-      <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">\r
-         <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>\r
-         <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">\r
-            <maxFileSize>${maxFileSize}</maxFileSize>\r
-         </timeBasedFileNamingAndTriggeringPolicy>\r
-         <maxHistory>${maxHistory}</maxHistory>\r
-         <totalSizeCap>${totalSizeCap}</totalSizeCap>\r
-      </rollingPolicy>\r
-      <encoder>\r
-         <pattern>${pattern}</pattern>\r
-      </encoder>\r
-   </appender>\r
-   <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFAudit">\r
-      <queueSize>${queueSize}</queueSize>\r
-      <appender-ref ref="EELFAudit" />\r
-   </appender>\r
-   <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFMetrics">\r
-      <file>${logDirectory}/${metricsLogName}.log</file>\r
-      <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">\r
-         <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>\r
-         <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">\r
-            <maxFileSize>${maxFileSize}</maxFileSize>\r
-         </timeBasedFileNamingAndTriggeringPolicy>\r
-         <maxHistory>${maxHistory}</maxHistory>\r
-         <totalSizeCap>${totalSizeCap}</totalSizeCap>\r
-      </rollingPolicy>\r
-      <encoder>\r
-         <pattern>${pattern}</pattern>\r
-      </encoder>\r
-   </appender>\r
-   <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFMetrics">\r
-      <queueSize>${queueSize}</queueSize>\r
-      <appender-ref ref="EELFMetrics" />\r
-   </appender>\r
-   <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFError">\r
-      <file>${logDirectory}/${errorLogName}.log</file>\r
-      <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">\r
-         <fileNamePattern>${logDirectory}/${errorLogName}.%d{yyyy-MM-dd}.%i.log</fileNamePattern>\r
-         <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">\r
-            <maxFileSize>${maxFileSize}</maxFileSize>\r
-         </timeBasedFileNamingAndTriggeringPolicy>\r
-         <maxHistory>${maxHistory}</maxHistory>\r
-         <totalSizeCap>${totalSizeCap}</totalSizeCap>\r
-      </rollingPolicy>\r
-      <encoder>\r
-         <pattern>${pattern}</pattern>\r
-      </encoder>\r
-      <filter class="ch.qos.logback.classic.filter.ThresholdFilter">\r
-         <level>INFO</level>\r
-      </filter>\r
-   </appender>\r
-   <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFError">\r
-      <queueSize>${queueSize}</queueSize>\r
-      <appender-ref ref="EELFError" />\r
-   </appender>\r
-   <appender class="ch.qos.logback.core.rolling.RollingFileAppender" name="EELFDebug">\r
-      <file>${logDirectory}/${debugLogName}.log</file>\r
-      <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">\r
-         <fileNamePattern>${logDirectory}/${debugLogName}.%i.log.zip</fileNamePattern>\r
-         <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">\r
-            <maxFileSize>${maxFileSize}</maxFileSize>\r
-         </timeBasedFileNamingAndTriggeringPolicy>\r
-         <maxHistory>${maxHistory}</maxHistory>\r
-         <totalSizeCap>${totalSizeCap}</totalSizeCap>\r
-      </rollingPolicy>\r
-      <encoder>\r
-         <pattern>${pattern}</pattern>\r
-      </encoder>\r
-   </appender>\r
-   <appender class="ch.qos.logback.classic.AsyncAppender" name="asyncEELFDebug">\r
-      <queueSize>${queueSize}</queueSize>\r
-      <appender-ref ref="EELFDebug" />\r
-      <includeCallerData>true</includeCallerData>\r
-   </appender>\r
-   <!-- ============================================================================ -->\r
-   <!--  EELF loggers -->\r
-   <!-- ============================================================================ -->\r
-   <logger additivity="false" level="info" name="com.att.eelf.audit">\r
-      <appender-ref ref="asyncEELFAudit" />\r
-   </logger>\r
-   <logger additivity="false" level="info" name="com.att.eelf.metrics">\r
-      <appender-ref ref="asyncEELFMetrics" />\r
-   </logger>\r
-   <logger additivity="false" level="info" name="com.att.eelf.error">\r
-      <appender-ref ref="asyncEELFError" />\r
-   </logger>\r
-   <logger additivity="false" level="debug" name="com.att.eelf.debug">\r
-      <appender-ref ref="asyncEELFDebug" />\r
-   </logger>\r
-   <root level="INFO">\r
-      <appender-ref ref="asyncEELFDebug" />\r
-      <appender-ref ref="asyncEELFError" />\r
-   </root>\r
-</configuration>
\ No newline at end of file
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log.00000001 b/kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log.00000001
deleted file mode 100644 (file)
index 8608ff7..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log.00000001 and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log_control b/kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log_control
deleted file mode 100644 (file)
index 9ae850a..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/aria_log_control and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/debian-10.0.flag b/kubernetes/config/docker/init/src/config/policy/mariadb/data/debian-10.0.flag
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/firstrun b/kubernetes/config/docker/init/src/config/policy/mariadb/data/firstrun
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile0 b/kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile0
deleted file mode 100644 (file)
index cc8b741..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile0 and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile1 b/kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile1
deleted file mode 100644 (file)
index 274bba0..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/ib_logfile1 and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/ibdata1 b/kubernetes/config/docker/init/src/config/policy/mariadb/data/ibdata1
deleted file mode 100644 (file)
index 3920f04..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/ibdata1 and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/log/db.opt b/kubernetes/config/docker/init/src/config/policy/mariadb/data/log/db.opt
deleted file mode 100644 (file)
index d8429c4..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-default-character-set=latin1
-default-collation=latin1_swedish_ci
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/multi-master.info b/kubernetes/config/docker/init/src/config/policy/mariadb/data/multi-master.info
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYI
deleted file mode 100644 (file)
index 9ff5ed6..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.frm
deleted file mode 100644 (file)
index fefc7eb..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/column_stats.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYI
deleted file mode 100644 (file)
index f261e28..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.frm
deleted file mode 100644 (file)
index faa4a8a..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/columns_priv.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYI
deleted file mode 100644 (file)
index 628c578..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.frm
deleted file mode 100644 (file)
index 1ab1f59..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/db.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYI
deleted file mode 100644 (file)
index fc4d47f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.frm
deleted file mode 100644 (file)
index 9089087..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/event.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYI
deleted file mode 100644 (file)
index b0ddde1..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.frm
deleted file mode 100644 (file)
index 42aca49..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/func.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSM b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSM
deleted file mode 100644 (file)
index 8d08b8d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSM and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSV b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.CSV
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.frm
deleted file mode 100644 (file)
index 919bb7f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/general_log.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.frm
deleted file mode 100644 (file)
index d09f1d4..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.ibd b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.ibd
deleted file mode 100644 (file)
index b74a0af..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/gtid_slave_pos.ibd and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYD
deleted file mode 100644 (file)
index 360a41a..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYD and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYI
deleted file mode 100644 (file)
index c381776..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.frm
deleted file mode 100644 (file)
index e9dc205..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_category.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYD
deleted file mode 100644 (file)
index 570509b..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYD and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYI
deleted file mode 100644 (file)
index 36715cf..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.frm
deleted file mode 100644 (file)
index 999eec1..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_keyword.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYD
deleted file mode 100644 (file)
index f963ea5..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYD and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYI
deleted file mode 100644 (file)
index 53190af..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.frm
deleted file mode 100644 (file)
index 6eef95a..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_relation.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYD
deleted file mode 100644 (file)
index ad4c19e..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYD and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYI
deleted file mode 100644 (file)
index d8ef966..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.frm
deleted file mode 100644 (file)
index 3b59b25..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/help_topic.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYI
deleted file mode 100644 (file)
index 2a1cfcb..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.frm
deleted file mode 100644 (file)
index 62ae8cd..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/host.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYI
deleted file mode 100644 (file)
index 05be1c8..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.frm
deleted file mode 100644 (file)
index e4cf7e0..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/index_stats.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.frm
deleted file mode 100644 (file)
index ed0f019..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.ibd b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.ibd
deleted file mode 100644 (file)
index daac102..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_index_stats.ibd and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.frm
deleted file mode 100644 (file)
index 64e3af3..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.ibd b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.ibd
deleted file mode 100644 (file)
index 7716fcc..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/innodb_table_stats.ibd and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYI
deleted file mode 100644 (file)
index 5e741be..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.frm
deleted file mode 100644 (file)
index 7f57bf2..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/plugin.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYI
deleted file mode 100644 (file)
index 253b7c7..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.frm
deleted file mode 100644 (file)
index a7c27b0..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proc.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYI
deleted file mode 100644 (file)
index 62aca26..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.frm
deleted file mode 100644 (file)
index 03a6ce6..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/procs_priv.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYD
deleted file mode 100644 (file)
index 5d8c536..0000000
+++ /dev/null
@@ -1 +0,0 @@
-ÿlocalhost                                                                                                                                                                           root                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                \ 1                                                                                                                                                                                                                                                                                                                                                                                                                                       VUÚXÿae9df72d0f92                                                                                                                                                                        root                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                \ 1                                                                                                                                                                                                                                                                                                                                                                                                                                       VUÚX
\ No newline at end of file
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYI
deleted file mode 100644 (file)
index 8ad2f00..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.frm
deleted file mode 100644 (file)
index 194540f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/proxies_priv.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYI
deleted file mode 100644 (file)
index adcba59..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.frm
deleted file mode 100644 (file)
index c3d60e7..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/roles_mapping.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYI
deleted file mode 100644 (file)
index c44463f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.frm
deleted file mode 100644 (file)
index 8892243..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/servers.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSM b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSM
deleted file mode 100644 (file)
index 8d08b8d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSM and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSV b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.CSV
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.frm
deleted file mode 100644 (file)
index 3509539..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/slow_log.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYI
deleted file mode 100644 (file)
index 0d26cd3..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.frm
deleted file mode 100644 (file)
index 6bac5bd..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/table_stats.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYI
deleted file mode 100644 (file)
index 610ffef..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.frm
deleted file mode 100644 (file)
index 008358b..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/tables_priv.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYI
deleted file mode 100644 (file)
index 99242f2..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.frm
deleted file mode 100644 (file)
index 5e091a2..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYI
deleted file mode 100644 (file)
index 8063843..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.frm
deleted file mode 100644 (file)
index ae89ff5..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_leap_second.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYI
deleted file mode 100644 (file)
index 46e949c..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.frm
deleted file mode 100644 (file)
index a9e7942..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYI
deleted file mode 100644 (file)
index a98b680..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.frm
deleted file mode 100644 (file)
index 58743dc..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYD
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYI
deleted file mode 100644 (file)
index d4f0bc1..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.frm
deleted file mode 100644 (file)
index 7d0229c..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/time_zone_transition_type.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYD b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYD
deleted file mode 100644 (file)
index 107af55..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYD and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYI b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYI
deleted file mode 100644 (file)
index c6eb47d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.MYI and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.frm
deleted file mode 100644 (file)
index 9e5f937..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/mysql/user.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/accounts.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/accounts.frm
deleted file mode 100644 (file)
index 76257e5..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/accounts.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/cond_instances.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/cond_instances.frm
deleted file mode 100644 (file)
index 746f90d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/cond_instances.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/db.opt b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/db.opt
deleted file mode 100644 (file)
index 4ed6015..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-default-character-set=utf8
-default-collation=utf8_general_ci
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_current.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_current.frm
deleted file mode 100644 (file)
index 5520614..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_current.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history.frm
deleted file mode 100644 (file)
index ff098a6..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history_long.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history_long.frm
deleted file mode 100644 (file)
index 7c80576..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_history_long.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_account_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_account_by_event_name.frm
deleted file mode 100644 (file)
index e550bc8..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_account_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_host_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_host_by_event_name.frm
deleted file mode 100644 (file)
index 07b0159..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_host_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_thread_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_thread_by_event_name.frm
deleted file mode 100644 (file)
index 953423d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_thread_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_user_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_user_by_event_name.frm
deleted file mode 100644 (file)
index ee203b4..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_by_user_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_global_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_global_by_event_name.frm
deleted file mode 100644 (file)
index 17695df..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_stages_summary_global_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_current.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_current.frm
deleted file mode 100644 (file)
index f1c697d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_current.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history.frm
deleted file mode 100644 (file)
index a22f245..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history_long.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history_long.frm
deleted file mode 100644 (file)
index 90184ae..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_history_long.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_account_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_account_by_event_name.frm
deleted file mode 100644 (file)
index 6d96ec6..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_account_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_digest.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_digest.frm
deleted file mode 100644 (file)
index bd5d1be..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_digest.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_host_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_host_by_event_name.frm
deleted file mode 100644 (file)
index a4cbc46..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_host_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_thread_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_thread_by_event_name.frm
deleted file mode 100644 (file)
index 2463e3f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_thread_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_user_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_user_by_event_name.frm
deleted file mode 100644 (file)
index 8e1e1b4..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_by_user_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_global_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_global_by_event_name.frm
deleted file mode 100644 (file)
index 0866d50..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_statements_summary_global_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_current.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_current.frm
deleted file mode 100644 (file)
index e511ca1..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_current.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history.frm
deleted file mode 100644 (file)
index 0ccd30e..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history_long.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history_long.frm
deleted file mode 100644 (file)
index 6d80113..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_history_long.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_account_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_account_by_event_name.frm
deleted file mode 100644 (file)
index 1e866b6..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_account_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_host_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_host_by_event_name.frm
deleted file mode 100644 (file)
index 2da5615..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_host_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_instance.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_instance.frm
deleted file mode 100644 (file)
index 8830264..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_instance.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_thread_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_thread_by_event_name.frm
deleted file mode 100644 (file)
index d72308c..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_thread_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_user_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_user_by_event_name.frm
deleted file mode 100644 (file)
index 787a3a5..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_by_user_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_global_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_global_by_event_name.frm
deleted file mode 100644 (file)
index f690713..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/events_waits_summary_global_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_instances.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_instances.frm
deleted file mode 100644 (file)
index 8583666..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_instances.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_event_name.frm
deleted file mode 100644 (file)
index 591f0e9..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_instance.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_instance.frm
deleted file mode 100644 (file)
index 9f5807e..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/file_summary_by_instance.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/host_cache.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/host_cache.frm
deleted file mode 100644 (file)
index be7423d..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/host_cache.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/hosts.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/hosts.frm
deleted file mode 100644 (file)
index c6aa503..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/hosts.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/mutex_instances.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/mutex_instances.frm
deleted file mode 100644 (file)
index cca0452..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/mutex_instances.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/objects_summary_global_by_type.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/objects_summary_global_by_type.frm
deleted file mode 100644 (file)
index 2fa0064..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/objects_summary_global_by_type.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/performance_timers.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/performance_timers.frm
deleted file mode 100644 (file)
index f7e1660..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/performance_timers.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/rwlock_instances.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/rwlock_instances.frm
deleted file mode 100644 (file)
index 135277b..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/rwlock_instances.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_account_connect_attrs.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_account_connect_attrs.frm
deleted file mode 100644 (file)
index d73a80e..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_account_connect_attrs.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_connect_attrs.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_connect_attrs.frm
deleted file mode 100644 (file)
index 31c7475..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/session_connect_attrs.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_actors.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_actors.frm
deleted file mode 100644 (file)
index 276443f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_actors.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_consumers.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_consumers.frm
deleted file mode 100644 (file)
index 80f6dd0..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_consumers.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_instruments.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_instruments.frm
deleted file mode 100644 (file)
index fd83053..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_instruments.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_objects.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_objects.frm
deleted file mode 100644 (file)
index 89ae59b..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_objects.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_timers.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_timers.frm
deleted file mode 100644 (file)
index 7058356..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/setup_timers.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_instances.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_instances.frm
deleted file mode 100644 (file)
index 20963dc..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_instances.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_event_name.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_event_name.frm
deleted file mode 100644 (file)
index 739b5d5..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_event_name.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_instance.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_instance.frm
deleted file mode 100644 (file)
index 649c26f..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/socket_summary_by_instance.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_index_usage.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_index_usage.frm
deleted file mode 100644 (file)
index 0fb793b..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_index_usage.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_table.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_table.frm
deleted file mode 100644 (file)
index ffe51b3..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_io_waits_summary_by_table.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_lock_waits_summary_by_table.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_lock_waits_summary_by_table.frm
deleted file mode 100644 (file)
index e771595..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/table_lock_waits_summary_by_table.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/threads.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/threads.frm
deleted file mode 100644 (file)
index 121dfd3..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/threads.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/users.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/users.frm
deleted file mode 100644 (file)
index 6bb88f3..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/performance_schema/users.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db.opt b/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db.opt
deleted file mode 100644 (file)
index d8429c4..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-default-character-set=latin1
-default-collation=latin1_swedish_ci
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.frm b/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.frm
deleted file mode 100644 (file)
index 29d4a20..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.frm and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.ibd b/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.ibd
deleted file mode 100644 (file)
index c331218..0000000
Binary files a/kubernetes/config/docker/init/src/config/policy/mariadb/data/support/db_version.ibd and /dev/null differ
diff --git a/kubernetes/config/docker/init/src/config/policy/mariadb/data/xacml/db.opt b/kubernetes/config/docker/init/src/config/policy/mariadb/data/xacml/db.opt
deleted file mode 100644 (file)
index d8429c4..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-default-character-set=latin1
-default-collation=latin1_swedish_ci
diff --git a/kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/push-policies.sh b/kubernetes/config/docker/init/src/config/policy/opt/policy/config/pe/push-policies.sh
deleted file mode 100644 (file)
index 7a71639..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#! /bin/bash
-
-
-echo "Pushing default policies"
-
-# Sometimes brmsgw gets an error when trying to retrieve the policies on initial push,
-# so for the BRMS policies we will do a push, then delete from the pdp group, then push again.
-# Second push should be successful.
-
-echo "pushPolicy : PUT : com.vFirewall"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.vFirewall",
-  "policyType": "MicroService"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
-
-sleep 2
-
-echo "pushPolicy : PUT : com.vLoadBalancer"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.vLoadBalancer",
-  "policyType": "MicroService"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy' 
-
-sleep 2
-
-echo "pushPolicy : PUT : com.BRMSParamvLBDemoPolicy"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.BRMSParamvLBDemoPolicy",
-  "policyType": "BRMS_Param"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
-
-sleep 2
-
-echo "pushPolicy : PUT : com.BRMSParamvFWDemoPolicy"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.BRMSParamvFWDemoPolicy",
-  "policyType": "BRMS_Param"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
-
-sleep 2
-
-echo "deletePolicy : DELETE : com.vFirewall"
-curl -v --silent -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-"pdpGroup": "default",
-"policyComponent": "PDP",
-"policyName": "com.vFirewall",
-"policyType": "MicroService"
-}' 'http://pdp.onap-policy:8081/pdp/api/deletePolicy'
-
-
-sleep 2
-
-echo "deletePolicy : DELETE : com.vLoadBalancer"
-curl -v --silent -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-"pdpGroup": "default",
-"policyComponent": "PDP",
-"policyName": "com.vLoadBalancer",
-"policyType": "MicroService"
-}' 'http://pdp.onap-policy:8081/pdp/api/deletePolicy'
-
-sleep 2
-
-echo "deletePolicy : DELETE : com.BRMSParamvFWDemoPolicy"
-curl -v --silent -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-"pdpGroup": "default",
-"policyComponent": "PDP",
-"policyName": "com.BRMSParamvFWDemoPolicy",
-"policyType": "BRMS_Param"
-}' 'http://pdp.onap-policy:8081/pdp/api/deletePolicy'
-
-
-sleep 2
-
-echo "deletePolicy : DELETE : com.BRMSParamvLBDemoPolicy"
-curl -v --silent -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-"pdpGroup": "default",
-"policyComponent": "PDP",
-"policyName": "com.BRMSParamvLBDemoPolicy",
-"policyType": "BRMS_Param"
-}' 'http://pdp.onap-policy:8081/pdp/api/deletePolicy'
-
-sleep 2
-
-echo "pushPolicy : PUT : com.vFirewall"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.vFirewall",
-  "policyType": "MicroService"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
-
-sleep 2
-
-echo "pushPolicy : PUT : com.vLoadBalancer"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.vLoadBalancer",
-  "policyType": "MicroService"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy' 
-
-sleep 2
-
-echo "pushPolicy : PUT : com.BRMSParamvLBDemoPolicy"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.BRMSParamvLBDemoPolicy",
-  "policyType": "BRMS_Param"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
-
-sleep 2
-
-echo "pushPolicy : PUT : com.BRMSParamvFWDemoPolicy"
-curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
-  "pdpGroup": "default",
-  "policyName": "com.BRMSParamvFWDemoPolicy",
-  "policyType": "BRMS_Param"
-}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
-
index e456c3e..ef46334 100755 (executable)
@@ -101,33 +101,32 @@ do
         #parse yaml files
         for line in  `parse_yaml $filename`
         do
-                #find all image subtag inside converted values.yaml file's lines
-                if echo $line | grep -q $IMAGE_TEXT ; then
-                        #find imageName inside line
-                        imageName=`echo $line | awk -F "=" '{print $2}'`
-                        #remove attional prefix and postfix
-                        imageNameFinal=`echo "$imageName" | sed -e 's/^"//' -e 's/"$//' `
-
-                        #check if line contain Version as a subtag in lines if yes then call docker pull with version
-                        if echo $line | grep -q $IMAGE_VERSION_TEXT ; then
-                                echo docker pull "$imageNameWithVersion":"$imageNameFinal"
-                                docker pull $imageNameWithVersion:$imageNameFinal &
-                                imageNameWithVersion=" "
-                        else
-                                #check Version is not in subtag and old scanned value is present then call docker pull without version
-                                if [ "$imageNameWithVersion" != " " ]; then
-                                        echo docker pull "$imageNameWithVersion"
-                                        docker pull $imageNameWithVersion &
-                                        imageNameWithVersion=$imageNameFinal
+                #skiping commented line
+                if [[ ${line:0:1} != '#' ]]; then
+                        #find all image subtag inside converted values.yaml file's lines
+                        if echo $line | grep -q $IMAGE_TEXT ; then
+                                #find imageName inside line
+                                imageName=`echo $line | awk -F "=" '{print $2}'`
+                                #remove attional prefix and postfix
+                                imageNameFinal=`echo "$imageName" | sed -e 's/^"//' -e 's/"$//' `
+
+                               #check if line contain Version as a subtag in lines if yes then call docker pull with version
+                                if echo $line | grep -q $IMAGE_VERSION_TEXT ; then
+                                        echo docker pull "$imageNameWithVersion":"$imageNameFinal"
+                                        docker pull $imageNameWithVersion:$imageNameFinal &
+                                        imageNameWithVersion=" "
                                 else
-                                        imageNameWithVersion=$imageNameFinal
+                                        #check Version is not in subtag and old scanned value is present then call docker pull without version
+                                        if [ "$imageNameWithVersion" != " " ]; then
+                                                echo docker pull "$imageNameWithVersion"
+                                                docker pull $imageNameWithVersion &
+                                                imageNameWithVersion=$imageNameFinal
+                                        else
+                                                imageNameWithVersion=$imageNameFinal
+                                        fi
                                 fi
                         fi
-
-
                 fi
-
-
         done
 done
 # complete processing
index 7cfa985..6a67572 100644 (file)
@@ -5,4 +5,4 @@ image:
   repository: oomk8s/config-init
   #master => Beijing (major release uprev)
   tag: 2.0.0-SNAPSHOT
-  pullPolicy: Always
\ No newline at end of file
+  pullPolicy: Always
index 65200c3..2daccee 100644 (file)
@@ -4,4 +4,3 @@ image:
   kube2msb: nexus3.onap.org:10001/onap/oom/kube2msb
 kubeMasterUrl: https://kubernetes.default.svc.cluster.local:443
 discoveryUrl: http://msb-discovery.onap-msb:10081
-kubeMasterAuthToken: QmFzaWMgTURrd056VXdSVEk1TVRGRk9UaEVOREJCTWprNlpHTnlaSGt4YzJsSVlsRlVZVU16WTFsUk1XWnZhV0UyY21GRWEwNUhkMDFDVWxORVRIUmlZUT09
index 0885aff..e8b6b81 100644 (file)
@@ -35,6 +35,21 @@ spec:
 ---
 apiVersion: v1
 kind: Service
+metadata:
+  name: logstashinternal
+  namespace: {{ .Values.nsPrefix }}-log
+  labels:
+    app: logstash
+spec:
+  ports:
+  - name: http
+    port: 9600
+    targetPort: 9600
+  selector:
+    app: logstash
+---
+apiVersion: v1
+kind: Service
 metadata:
   name: logstash
   namespace: {{ .Values.nsPrefix }}-log
index 74ab921..bbf7260 100644 (file)
@@ -18,9 +18,14 @@ spec:
     spec:
       initContainers:
       - command:
-        - sysctl
-        - -w
-        - vm.max_map_count=262144
+        - /bin/sh
+        - -c
+        - |
+          sysctl -w vm.max_map_count=262144
+          mkdir -p /logroot/elasticsearch/logs
+          mkdir -p /logroot/elasticsearch/data
+          chmod -R 777 /logroot/elasticsearch
+          chown -R root:root /logroot
         env:
         - name: NAMESPACE
           valueFrom:
@@ -32,6 +37,9 @@ spec:
         image: {{ .Values.image.es_bb }}
         imagePullPolicy: {{ .Values.pullPolicy }}
         name: init-sysctl
+        volumeMounts:
+        - name: elasticsearch-logs
+          mountPath: /logroot/
       containers:
       - name: elasticsearch
         image: {{ .Values.image.elasticsearch}}
@@ -59,7 +67,7 @@ spec:
             claimName: elasticsearch-db
         - name: elasticsearch-logs
           hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/elasticsearch/logs
+            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/
         - name: elasticsearch-conf
           configMap:
             name: elasticsearch-configmap
index 47c72c8..f99657a 100644 (file)
@@ -35,9 +35,12 @@ spec:
       - name: logstash
         image: {{ .Values.image.logstash }}
         ports:
-        - containerPort: 5044 
+        - containerPort: 5044
           name: transport
           protocol: TCP
+        - containerPort: 9600
+          name: http
+          protocol: TCP
         readinessProbe:
           tcpSocket:
             port: 5044
index 0441830..3ee3529 100644 (file)
@@ -1,5 +1,5 @@
 nsPrefix: onap
-pullPolicy: Always 
+pullPolicy: Always
 nodePortPrefix: 302
 image:
   readiness: oomk8s/readiness-check:1.0.0
@@ -7,4 +7,3 @@ image:
   kibana: docker.elastic.co/kibana/kibana:5.5.0
   elasticsearch: docker.elastic.co/elasticsearch/elasticsearch:5.5.0
   es_bb: busybox
-
index 1d9777e..cecf69b 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: dmaap
   namespace: "{{ .Values.nsPrefix }}-message-router"
 spec:
+  replicas: {{ .Values.dmaapReplicas }}
   selector:
     matchLabels:
       app: dmaap
index 509b15f..94e01b3 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: global-kafka
   namespace: "{{ .Values.nsPrefix }}-message-router"
 spec:
+  replicas: {{ .Values.kafkaReplicas }}
   selector:
     matchLabels:
       app: global-kafka
index 47cdb51..5a5bc9f 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: zookeeper
   namespace: "{{ .Values.nsPrefix }}-message-router"
 spec:
+  replicas: {{ .Values.zookeeperReplicas }}
   selector:
     matchLabels:
       app: zookeeper
@@ -41,4 +42,4 @@ spec:
           claimName: message-router-zookeeper
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 9206729..6782b71 100644 (file)
@@ -1,6 +1,9 @@
 nsPrefix: onap
 pullPolicy: Always
 nodePortPrefix: 302
+dmaapReplicas: 1
+kafkaReplicas: 1
+zookeeperReplicas: 1
 image:
   readiness: oomk8s/readiness-check:1.0.0
   dmaap: attos/dmaap:latest
index 5f8db61..5c01feb 100644 (file)
@@ -16,6 +16,21 @@ spec:
       name: msb-discovery
     spec:
       hostname: msb-discovery
+      initContainers:
+      - command:
+        - /root/ready.py
+        args:
+        - --container-name
+        - msb-consul
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        image: {{ .Values.image.readiness }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: msb-discovery-readiness
       containers:
       - args:
         image:  {{ .Values.image.discovery }}
@@ -34,4 +49,4 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy }}
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 714f19b..10cbbe1 100644 (file)
@@ -16,6 +16,21 @@ spec:
       name: msb-eag
     spec:
       hostname: msb-eag
+      initContainers:
+      - command:
+        - /root/ready.py
+        args:
+        - --container-name
+        - msb-discovery
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        image: {{ .Values.image.readiness }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: msb-eag-readiness
       containers:
       - args:
         image:  {{ .Values.image.apigateway }}
@@ -38,4 +53,4 @@ spec:
         imagePullPolicy: {{ .Values.pullPolicy}}
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index ca4d5a0..18dbc67 100644 (file)
@@ -16,6 +16,21 @@ spec:
       name: msb-iag
     spec:
       hostname: msb-iag
+      initContainers:
+      - command:
+        - /root/ready.py
+        args:
+        - --container-name
+        - msb-discovery
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        image: {{ .Values.image.readiness }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: msb-iag-readiness
       containers:
       - args:
         image:  {{ .Values.image.apigateway }}
@@ -38,4 +53,4 @@ spec:
         imagePullPolicy: "{{ .Values.pullPolicy}}"
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 7c1b16b..9bbdb92 100644 (file)
@@ -1,6 +1,7 @@
 nsPrefix: onap
 pullPolicy: IfNotPresent
 image:
+  readiness: oomk8s/readiness-check:1.0.0
   consul: consul:0.9.3
   discovery: nexus3.onap.org:10001/onap/msb/msb_discovery:1.0.0
   apigateway: nexus3.onap.org:10001/onap/msb/msb_apigateway:1.0.0
diff --git a/kubernetes/mso/resources/config/log/filebeat/filebeat.yml b/kubernetes/mso/resources/config/log/filebeat/filebeat.yml
new file mode 100644 (file)
index 0000000..3229027
--- /dev/null
@@ -0,0 +1,41 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.{{ .Values.nsPrefix }}-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
@@ -9,9 +9,9 @@
 
     "mso-api-handler-infra-config":
     {
-      "bpelURL": "http://mso.onap-mso.svc.cluster.local:8080",
+      "bpelURL": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080",
       "bpelAuth": "786864AA53D0DCD881AED1154230C0C3058D58B9339D2EFB6193A0F0D82530E1",
-      "camundaURL": "http://mso.onap-mso.svc.cluster.local:8080",
+      "camundaURL": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080",
       "camundaAuth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1"
     },
 
@@ -22,7 +22,7 @@
         "user": "mso",
         "consumerGroup": "sdc-OpenSource-Env1",
         "consumerId": "sdc-COpenSource-Env11",
-        "environmentName": "DMAAP_TOPIC_HERE",
+        "environmentName": "{{ .Values.dmaapTopic }}",
         "asdcAddress": "sdc-be.onap-sdc.svc.cluster.local:8443",
         "password": "613AF3483E695524F9857643B697FA51C7A9A0951094F53791485BF3458F9EADA37DBACCCEBD0CB242B85B4062745247",
         "pollingInterval": 60,
     {
       "sdncurls":
       [
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/L3SDN-API:",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/config/L3SDN-API:",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/Firewall-API:",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/config",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/VNF-API:",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/NBNC-API:",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/NORTHBOUND-API:service-topology-operation",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/GENERIC-RESOURCE-API:",
-        "http://sdnhost.onap-sdnc.svc.cluster.local:8282/restconf/operations/VNFTOPOLOGYAIC-API:"
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/L3SDN-API:",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/config/L3SDN-API:",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/Firewall-API:",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/config",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/VNF-API:",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/NBNC-API:",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/NORTHBOUND-API:service-topology-operation",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/GENERIC-RESOURCE-API:",
+        "http://sdnhost.{{ .Values.nsPrefix }}-sdnc.svc.cluster.local:8282/restconf/operations/VNFTOPOLOGYAIC-API:"
       ],
 
-      "bpelurl": "http://mso.onap-mso.svc.cluster.local:8080/mso/SDNCAdapterCallbackService",
-      "restbpelurl": "http://mso.onap-mso.svc.cluster.local:8080/mso/WorkflowMessage",
-      "myurl": "http://mso.onap-mso.svc.cluster.local:8080/adapters/rest/SDNCNotify",
+      "bpelurl": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/SDNCAdapterCallbackService",
+      "restbpelurl": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/WorkflowMessage",
+      "myurl": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/adapters/rest/SDNCNotify",
       "sdncauth": "263f7d5f944d4d0c76db74b4148bec67d0bc796a874bc0d2a2a12aae89a866aa69133f700f391f784719a37f6a68d29bf5a2fbae1dab0402db7788c800c5ba73",
       "bpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1",
       "sdncconnecttime": "5000"
       [
         {
           "dcp_clli": "DEFAULT_KEYSTONE",
-          "identity_url": "OPENSTACK_KEYSTONE_IP_HERE/v2.0",
-          "mso_id": "OPENSTACK_USERNAME_HERE",
-          "mso_pass": "OPENSTACK_ENCRYPTED_PASSWORD_HERE",
-          "admin_tenant": "OPENSTACK_SERVICE_TENANT_NAME_HERE",
+          "identity_url": "{{ .Values.openStackKeyStoneUrl }}/v2.0",
+          "mso_id": "{{ .Values.openStackUserName }}",
+          "mso_pass": "{{ .Values.openStackEncryptedPasswordHere }}",
+          "admin_tenant":"{{ .Values.openStackServiceTenantName }}",
           "member_role": "admin",
           "tenant_metadata": "true",
           "identity_server_type": "KEYSTONE",
       "cloud_sites":
       [
         {
-          "id": "OPENSTACK_REGION_HERE",
+          "id": "{{ .Values.openStackRegion }}",
           "aic_version": "2.5",
-          "lcp_clli": "OPENSTACK_REGION_HERE",
-          "region_id": "OPENSTACK_REGION_HERE",
+          "lcp_clli": "{{ .Values.openStackRegion }}",
+          "region_id": "{{ .Values.openStackRegion }}",
           "identity_service_id": "DEFAULT_KEYSTONE"
         }
       ],
@@ -92,7 +92,7 @@
 
     "mso-workflow-message-adapter-config":
     {
-      "wmbpelurl": "http://mso.onap-mso.svc.cluster.local:8080/mso/WorkflowMessage",
+      "wmbpelurl": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/WorkflowMessage",
       "wmbpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1"
     },
 
       "versionIdL3ToHigherLayerDeleteBonding": "52dbec20-47aa-42e4-936c-331d8e350d44",
       "infraCustomerId": "21014aa2-526b-11e6-beb8-9e71128cae77",
       "sniroAuth": "test:testpwd",
-      "sniroEndpoint": "http://sniro-emulator.onap-mock.svc.cluster.local:8080/sniro/api/v2/placement",
+      "sniroEndpoint": "http://sniro-emulator.{{ .Values.nsPrefix }}-mock.svc.cluster.local:8080/sniro/api/v2/placement",
       "sniroTimeout": "PT30M",
-      "serviceAgnosticSniroHost": "http://sniro-emulator.onap-mock.svc.cluster.local:8080",
+      "serviceAgnosticSniroHost": "http://sniro-emulator.{{ .Values.nsPrefix }}-mock.svc.cluster.local:8080",
       "serviceAgnosticSniroEndpoint": "/sniro/api/v2/placement",
-      "aaiEndpoint": "https://aai-service.onap-aai.svc.cluster.local:8443",
+      "aaiEndpoint": "https://aai-service.{{ .Values.nsPrefix }}-aai.svc.cluster.local:8443",
       "aaiAuth": "2630606608347B7124C244AB0FE34F6F",
       "adaptersNamespace": "http://org.openecomp.mso",
-      "adaptersCompletemsoprocessEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/CompleteMsoProcess",
-      "adaptersDbEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/dbadapters/MsoRequestsDbAdapter",
-      "adaptersOpenecompDbEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/dbadapters/RequestsDbAdapter",
-      "catalogDbEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/ecomp/mso/catalog",
-      "adaptersSdncEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/adapters/SDNCAdapter",
-      "adaptersSdncRestEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/adapters/rest/v1/sdnc",
-      "adaptersTenantEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/tenants/TenantAdapter",
+      "adaptersCompletemsoprocessEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/CompleteMsoProcess",
+      "adaptersDbEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/dbadapters/MsoRequestsDbAdapter",
+      "adaptersOpenecompDbEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/dbadapters/RequestsDbAdapter",
+      "catalogDbEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/ecomp/mso/catalog",
+      "adaptersSdncEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/adapters/SDNCAdapter",
+      "adaptersSdncRestEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/adapters/rest/v1/sdnc",
+      "adaptersTenantEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/tenants/TenantAdapter",
       "adaptersDbAuth": "6B0E6863FB8EE010AB6F191B3C0489437601E81DC7C86305CB92DB98AFC53D74",
-      "adaptersWorkflowMessageEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/workflows/messages/message",
-      "workflowMessageEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/mso/WorkflowMessage",
-      "workflowSdncAdapterCallback": "http://mso.onap-mso.svc.cluster.local:8080/mso/SDNCAdapterCallbackService",
+      "adaptersWorkflowMessageEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/workflows/messages/message",
+      "workflowMessageEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/WorkflowMessage",
+      "workflowSdncAdapterCallback": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/SDNCAdapterCallbackService",
       "workflowSdncReplicationDelay": "PT5S",
       "workflowAaiDistributionDelay": "PT30S",
       "msoKey": "07a7159d3bf51a0e53be7a8f89699be7",
       "adaptersPoAuth": "6B0E6863FB8EE010AB6F191B3C0489437601E81DC7C86305CB92DB98AFC53D74",
       "sdncTimeout": "PT5M",
       "rollback": "true",
-      "adaptersNetworkEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/networks/NetworkAdapter",
-      "adaptersNetworkRestEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/networks/rest/v1/networks",
-      "adaptersVnfAsyncEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/vnfs/VnfAdapterAsync",
-      "workflowVnfAdapterDeleteCallback": "http://mso.onap-mso.svc.cluster.local:8080/mso/vnfAdapterNotify",
-      "workflowVnfAdapterCreateCallback": "http://mso.onap-mso.svc.cluster.local:8080/mso/vnfAdapterNotify",
-      "adaptersVnfRestEndpoint": "http://mso.onap-mso.svc.cluster.local:8080/vnfs/rest/v1/vnfs",
-      "workflowVnfAdapterRestCallback": "http://mso.onap-mso.svc.cluster.local:8080/mso/vnfAdapterRestNotify",
+      "adaptersNetworkEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/networks/NetworkAdapter",
+      "adaptersNetworkRestEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/networks/rest/v1/networks",
+      "adaptersVnfAsyncEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/vnfs/VnfAdapterAsync",
+      "workflowVnfAdapterDeleteCallback": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/vnfAdapterNotify",
+      "workflowVnfAdapterCreateCallback": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/vnfAdapterNotify",
+      "adaptersVnfRestEndpoint": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/vnfs/rest/v1/vnfs",
+      "workflowVnfAdapterRestCallback": "http://mso.{{ .Values.nsPrefix }}-mso.svc.cluster.local:8080/mso/vnfAdapterRestNotify",
       "poTimeout": "PT5M",
       "sdncFirewallYangModel": "http://com/att/svc/mis/firewall-lite-gui",
       "sdncFirewallYangModelVersion": "2015-05-15",
diff --git a/kubernetes/mso/templates/db-deployment-configmap.yaml b/kubernetes/mso/templates/db-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..ac3c3bf
--- /dev/null
@@ -0,0 +1,65 @@
+#{{ if not .Values.disableMsoMariadb }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-confd-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/conf.d/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-docker-entry-initd-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-automated-tests-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-bulkload-default-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-demo-dns-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-demo-vfw-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-scripts-camunda-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-main-schemas-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/*").AsConfig . | indent 2 }}
+#{{ end }}
index 820d7e2..cc4c656 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: mariadb
   namespace: "{{ .Values.nsPrefix }}-mso"
 spec:
+  replicas: {{ .Values.dbReplicas }}
   selector:
     matchLabels:
       app: mariadb
@@ -29,8 +30,24 @@ spec:
           readOnly: true
         - mountPath: /etc/mysql/conf.d
           name: mso-mariadb-conf
-        - mountPath: /docker-entrypoint-initdb.d
+        - mountPath: /docker-entrypoint-initdb.d/02-load-additional-changes.sh
           name: mso-mariadb-docker-entrypoint-initdb
+          subPath: 02-load-additional-changes.sh
+        - mountPath: /docker-entrypoint-initdb.d/01-load-default-sql-files.sh
+          name: mso-mariadb-docker-entrypoint-initdb
+          subPath: 01-load-default-sql-files.sh
+        - mountPath: /docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests
+          name: mso-mariadb-docker-entrypoint-automated-tests
+        - mountPath: /docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default
+          name: mso-mariadb-docker-entrypoint-bulkload-default
+        - mountPath: /docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns
+          name: mso-mariadb-docker-entrypoint-demo-dns
+        - mountPath: /docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw
+          name: mso-mariadb-docker-entrypoint-demo-vfw
+        - mountPath: /docker-entrypoint-initdb.d/db-sql-scripts/camunda
+          name: mso-mariadb-docker-entrypoint-camunda
+        - mountPath: /docker-entrypoint-initdb.d/db-sql-scripts/main-schemas
+          name: mso-mariadb-docker-entrypoint-main-schemas
         - mountPath: /var/lib/mysql
           name: mso-mariadb-data
         ports:
@@ -42,18 +59,36 @@ spec:
           initialDelaySeconds: 5
           periodSeconds: 10
       volumes:
-        - name: localtime
-          hostPath:
-            path: /etc/localtime
         - name: mso-mariadb-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/mso/mariadb/conf.d
+          configMap:
+            name: mso-confd-configmap
         - name: mso-mariadb-docker-entrypoint-initdb
+          configMap:
+            name: mso-docker-entry-initd-configmap
+        - name: mso-mariadb-docker-entrypoint-automated-tests
+          configMap:
+            name: mso-automated-tests-configmap
+        - name: mso-mariadb-docker-entrypoint-bulkload-default
+          configMap:
+            name: mso-bulkload-default-configmap
+        - name: mso-mariadb-docker-entrypoint-demo-dns
+          configMap:
+            name: mso-demo-dns-configmap
+        - name: mso-mariadb-docker-entrypoint-demo-vfw
+          configMap:
+            name: mso-demo-vfw-configmap
+        - name: mso-mariadb-docker-entrypoint-camunda
+          configMap:
+            name: mso-scripts-camunda-configmap
+        - name: mso-mariadb-docker-entrypoint-main-schemas
+          configMap:
+            name: mso-main-schemas-configmap
+        - name: localtime
           hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/mso/mariadb/docker-entrypoint-initdb.d
+            path: /etc/localtime
         - name: mso-mariadb-data
           persistentVolumeClaim:
             claimName: mso-db
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/mso/templates/mso-deployment-configmap.yaml b/kubernetes/mso/templates/mso-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..55b9f81
--- /dev/null
@@ -0,0 +1,17 @@
+#{{ if not .Values.disableMsoMso }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-config-mso-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/mso/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-docker-file-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/docker-files/scripts/start-jboss-server.sh").AsConfig . | indent 2 }}
+#{{ end }}
index 2abb290..b414640 100644 (file)
@@ -5,7 +5,7 @@ metadata:
   name: mso
   namespace: "{{ .Values.nsPrefix }}-mso"
 spec:
-  replicas: 1
+  replicas: {{ .Values.msoReplicas }}
   selector:
     matchLabels:
       app: mso
@@ -40,34 +40,46 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /shared
+        - mountPath: /shared/
           name: mso
         - mountPath: /tmp/start-jboss-server.sh
           name: mso-docker-files
+          subPath: start-jboss-server.sh
         - mountPath: /var/log/onap
           name: mso-logs
-        - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-api-handler-infra-config/logback.apihandler-infra.xml
-          name: logback-apihandler-infra
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-po-adapter-config/logback.network.xml
-          name: logback-network
+          name: mso-logback
+          subPath: logback.network.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-po-adapter-config/logback.tenant.xml
-          name: logback-tenant
+          name: mso-logback
+          subPath: logback.tenant.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-po-adapter-config/logback.vnf.xml
-          name: logback-vnf
+          name: mso-logback
+          subPath: logback.vnf.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-po-adapter-config/logback.vfc.xml
-          name: logback-vfc
+          name: mso-logback
+          subPath: logback.vfc.xml
+        - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-api-handler-infra-config/logback.apihandler-infra.xml
+          name: mso-logback
+          subPath: logback.apihandler-infra.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-appc-adapter-config/logback.appc.xml
-          name: logback-appc
+          name: mso-logback
+          subPath: logback.appc.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-requests-db-adapter-config/logback.msorequestsdbadapter.xml
-          name: logback-msorequestsdbadapter
+          name: mso-logback
+          subPath: logback.msorequestsdbadapter.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-asdc-controller-config/logback.asdc.xml
-          name: logback-asdc
+          name: mso-logback
+          subPath: logback.asdc.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-sdnc-adapter-config/logback.sdnc.xml
-          name: logback-sdnc
+          name: mso-logback
+          subPath: logback.sdnc.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-bpmn-config/logback.bpmn.xml
-          name: logback-bpmn
+          name: mso-logback
+          subPath: logback.bpmn.xml
         - mountPath: /var/berks-cookbooks/mso-config/files/default/mso-workflow-message-adapter-config/logback.workflow-message-adapter.xml
-          name: logback-workflow-message-adapter
+          name: mso-logback
+          subPath: logback.workflow-message-adapter.xml
         env:
         - name: JBOSS_DEBUG
           value: "false"
@@ -88,6 +100,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: mso-logs
         - mountPath: /usr/share/filebeat/data
@@ -96,52 +109,36 @@ spec:
         - name: localtime
           hostPath:
             path: /etc/localtime
-        - name: logback-apihandler-infra
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.apihandler-infra.xml
-        - name: logback-network
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.network.xml
-        - name: logback-tenant
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.tenant.xml
-        - name: logback-vnf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.vnf.xml
-        - name: logback-vfc
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.vfc.xml
-        - name: logback-appc
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.appc.xml
-        - name: logback-msorequestsdbadapter
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.msorequestsdbadapter.xml
-        - name: logback-asdc
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.asdc.xml
-        - name: logback-sdnc
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.sdnc.xml
-        - name: logback-bpmn
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.bpmn.xml
-        - name: logback-workflow-message-adapter
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/mso/logback.workflow-message-adapter.xml
-        - name: mso
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/mso/mso
+        - name: mso-logback
+          configMap:
+            name: mso-log-configmap
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+          configMap:
+            name: mso-filebeat-configmap
+        - name: mso
+          configMap:
+            name: mso-config-mso-configmap
+            items:
+            - key: mso-docker.json
+              path: mso-docker.json
+              mode: 0755
+            - key: aai.crt
+              path: aai.crt
+              mode: 0755
+            - key: encryption.key
+              path: encryption.key
+              mode: 0644
         - name: mso-logs
           emptyDir: {}
         - name: mso-data-filebeat
           emptyDir: {}
         - name: mso-docker-files
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/mso/docker-files/scripts/start-jboss-server.sh
+          configMap:
+            name: mso-docker-file-configmap
+            items:
+            - key: start-jboss-server.sh
+              path: start-jboss-server.sh
+              mode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
diff --git a/kubernetes/mso/templates/mso-log-configmap.yaml b/kubernetes/mso/templates/mso-log-configmap.yaml
new file mode 100644 (file)
index 0000000..3bf8412
--- /dev/null
@@ -0,0 +1,17 @@
+#{{ if not .Values.disableMsoMso }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-log-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/log/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: mso-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}-mso
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
+#{{ end }}
index fd8b669..77d3d60 100644 (file)
@@ -1,6 +1,14 @@
 nsPrefix: onap
-pullPolicy: Always 
+pullPolicy: Always
 nodePortPrefix: 302
+openStackUserName: "vnf_user"
+openStackRegion: "RegionOne"
+openStackKeyStoneUrl: "http://1.2.3.4:5000"
+dmaapTopic: "AUTO"
+openStackServiceTenantName: "service"
+openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+msoReplicas: 1
+dbReplicas: 1
 image:
   readiness: oomk8s/readiness-check:1.0.0
   mso: nexus3.onap.org:10001/openecomp/mso:v1.1.1
index 72d852d..5a99b19 100755 (executable)
@@ -84,7 +84,13 @@ create_onap_helm() {
     fi
   fi
 
-  cmd=`echo helm install $LOCATION/$2/ --name $1-$2 --namespace $1 --set nsPrefix=$1,nodePortPrefix=$3 ${HELM_VALUES_ADDITION}`
+  # assign default auth token
+  if [[ -z $ONAP_DEFAULT_AUTH_TOKEN ]]; then
+    DEFAULT_SECRET=`kubectl get secrets -n $1-$2 | grep default-token |  awk '{ print $1}'`
+    ONAP_DEFAULT_AUTH_TOKEN=`kubectl get secrets $DEFAULT_SECRET -n $1-$2 -o yaml | grep  'token:'  | awk '{ print $2}' | base64 --decode`
+  fi
+
+  cmd=`echo helm install $LOCATION/$2/ --name $1-$2 --namespace $1 --set nsPrefix=$1,nodePortPrefix=$3,kubeMasterAuthToken=$ONAP_DEFAULT_AUTH_TOKEN ${HELM_VALUES_ADDITION}`
   eval ${cmd}
   check_return_code $cmd
 }
diff --git a/kubernetes/oneclick/tools/autoCleanConfig.bash b/kubernetes/oneclick/tools/autoCleanConfig.bash
new file mode 100644 (file)
index 0000000..e274e0d
--- /dev/null
@@ -0,0 +1,54 @@
+########################################################################################
+# This script wraps {$OOM}/kubernetes/oneclick/deleteAll.sh script along with          #
+# the following steps to clean up ONAP configure for specified namespace:              #
+#     - remove namespace                                                               #
+#     - remove release                                                                 #
+#     - remove shared directory                                                        #
+#                                                                                      #
+# To run it, just enter the following command:                                         #
+#    ./autoCleanConfig.bash <namespace, default is "onap">                             #
+########################################################################################
+#!/bin/bash
+
+
+NS=$1
+if [[ -z $NS ]]
+then
+  echo "Namespace is not specified, use onap namespace."
+  NS="onap"
+fi
+
+echo "Clean up $NS configuration"
+cd ..
+./deleteAll.bash -n $NS -y
+cd -
+
+echo "----------------------------------------------
+Force remove namespace..."
+kubectl delete namespace $NS
+echo "...done : kubectl get namespace
+-----------------------------------------------
+>>>>>>>>>>>>>> k8s namespace"
+kubectl get namespace
+while [[ ! -z `kubectl get namespace|grep $NS` ]]
+do
+  echo "Wait for namespace $NS to be deleted
+-----------------------------------------------
+>>>>>>>>>>>>>> k8s namespace"
+  kubectl get namespace
+  sleep 2
+done
+
+echo "Force delete helm process ..."
+helm delete $NS-config --purge --debug
+echo "...done : helm ls --all
+ -----------------------------------------------
+>>>>>>>>>>>>>> helm"
+helm ls --all
+
+echo "Remove $NS dockerdata..."
+sudo rm -rf /dockerdata-nfs/onap
+echo "...done : ls -altr /dockerdata-nfs
+ -----------------------------------------------
+>>>>>>>>>>>>>> /dockerdata-nfs directory"
+ls -altr /dockerdata-nfs
diff --git a/kubernetes/oneclick/tools/autoCreateConfig.bash b/kubernetes/oneclick/tools/autoCreateConfig.bash
new file mode 100644 (file)
index 0000000..99ea03e
--- /dev/null
@@ -0,0 +1,65 @@
+########################################################################################
+# This script wraps {$OOM}/kubernetes/config/createConfig.sh script                    #
+# and will only terminated when the configuration is Completed or failed               #
+#                                                                                      #
+# To run it, just enter the following command:                                         #
+#    ./autoCreateConfig.bash <namespace, default is "onap">                            #
+########################################################################################
+#!/bin/bash
+
+
+NS=$1
+if [[ -z $NS ]]
+then
+  echo "Namespace is not specified, use onap namespace."
+  NS="onap"
+fi
+
+echo "Create $NS config under config directory..."
+cd ../../config
+./createConfig.sh -n $NS
+cd -
+
+
+echo "...done : kubectl get namespace
+-----------------------------------------------
+>>>>>>>>>>>>>> k8s namespace"
+kubectl get namespace
+
+
+echo "
+-----------------------------------------------
+>>>>>>>>>>>>>> helm : helm ls --all"
+helm ls --all
+
+
+echo "
+-----------------------------------------------
+>>>>>>>>>>>>>> pod : kubectl get pods -n $NS -a"
+kubectl get pods -n $NS -a
+
+
+while true
+do
+  echo "wait for $NS config pod reach to Completed STATUS"
+  sleep 5
+  echo "-----------------------------------------------"
+  kubectl get pods -n $NS -a
+
+  status=`kubectl get pods -n $NS -a |grep config |xargs echo | cut -d' ' -f3`
+
+  if [ "$status" = "Completed" ]
+  then
+    echo "$NS config is Completed!!!"
+    break
+  fi
+
+  if [ "$status" = "Error" ]
+  then
+    echo "
+$NS config is failed with Error!!!
+Logs are:"
+    kubectl logs config -n $NS -f
+    break
+  fi
+done
diff --git a/kubernetes/oneclick/tools/collectInfo.bash b/kubernetes/oneclick/tools/collectInfo.bash
new file mode 100644 (file)
index 0000000..734c5a6
--- /dev/null
@@ -0,0 +1,171 @@
+#!/bin/bash
+
+NS=
+OUT_NAME=onap_info_$(date +%y.%m.%d_%H.%M.%S.%N)
+OUT_FILE=
+OUT_DIR=$(dirname "$0")
+TMP_DIR=$(dirname $(mktemp -u))
+CONTAINER_LOGS_PATH=/var/log/onap
+CP_CONTAINER_LOGS=false
+
+if [ ! -z "$DEBUG" ]; then
+  set -x
+fi
+
+usage() {
+  cat <<EOF
+Utility script collecting various information about ONAP deployment on kubernetes.
+
+Usage: $0 [PARAMs]
+-u                  : Display usage
+-n [NAMESPACE]      : Kubernetes namespace (required)
+-a [APP]            : Specify a specific ONAP component (default: all)
+-d [OUT_DIR]        : Specify output folder for the collected info pack file
+                      (default: current dir)
+-f [OUT_FILE]       : Specify output file for the collected info
+                      (default: file name with timestamp)
+-c                  : Collect log files from containers, from path ${CONTAINER_LOGS_PATH}
+EOF
+}
+
+call_with_log() {
+  local _cmd=$1
+  local _log=$2
+  # Make sure otput dir exists
+  mkdir -p "$(dirname "$_log")"
+  printf "Command: ${_cmd}\n" >> ${_log}
+  printf "================================================================================\n" >> ${_log}
+  eval "${_cmd}" >> ${_log} 2>&1
+  printf "================================================================================\n" >> ${_log}
+}
+
+collect_pod_info() {
+  local _ns=$1
+  local _id=$2
+  local _log_dir=$3
+  local _cp_logs=$4
+  declare -i _i=0
+  kubectl -n $_ns get pods $_id -o=jsonpath='{range .spec.containers[*]}{.name}{"\n"}{end}' | while read c; do
+    call_with_log "kubectl -n $_ns logs $_id -c $c" "$_log_dir/$_id-$c.log"
+    if [ "$_i" -eq "0" ] && [ "$_cp_logs" == "true" ]; then
+      # copy logs from 1st container only as logs dir is shared between the containers
+      local _cmd="kubectl cp $_ns/$_id:${CONTAINER_LOGS_PATH} $_log_dir/$_id-$c -c $c"
+      if [ -z "$DEBUG" ]; then
+        _cmd+=" > /dev/null 2>&1"
+      fi
+      eval "${_cmd}"
+    fi
+    ((_i++))
+  done
+}
+
+collect_ns_info() {
+  local _ns=$1
+  local _log_dir=$2/$_ns
+  call_with_log "kubectl -n $NS-$i get services -o=wide" "$_log_dir/list_services.log"
+  kubectl -n "$_ns" get services | while read i; do
+    local _id=`echo -n $i | tr -s ' ' | cut -d' ' -n -f1`
+    if [ "$_id" == "NAME" ]; then
+      continue
+    fi
+    call_with_log "kubectl -n $_ns describe services $_id" "$_log_dir/describe_services/$_id.log"
+  done
+  call_with_log "kubectl -n $NS-$i get pods -o=wide" "$_log_dir/list_pods.log"
+  kubectl -n "$_ns" get pods | while read i; do
+    local _id=`echo -n $i | tr -s ' ' | cut -d' ' -n -f1`
+    if [ "$_id" == "NAME" ]; then
+      continue
+    fi
+    call_with_log "kubectl -n $_ns describe pods $_id" "$_log_dir/describe_pods/$_id.log"
+    collect_pod_info "$_ns" "$_id" "$_log_dir/logs" "${CP_CONTAINER_LOGS}"
+  done
+}
+
+while getopts ":un:a:d:f:c" PARAM; do
+  case $PARAM in
+    u)
+      usage
+      exit 1
+      ;;
+    n)
+      NS=${OPTARG}
+      ;;
+    a)
+      APP=${OPTARG}
+      if [[ -z $APP ]]; then
+        usage
+        exit 1
+      fi
+      ;;
+    d)
+      OUT_DIR=${OPTARG}
+      if [[ -z $OUT_DIR ]]; then
+        usage
+        exit 1
+      fi
+      ;;
+    f)
+      OUT_FILE=${OPTARG}
+      if [[ -z $OUT_FILE ]]; then
+        usage
+        exit 1
+      fi
+      ;;
+    c)
+      CP_CONTAINER_LOGS=true
+      ;;
+    ?)
+      usage
+      exit
+      ;;
+  esac
+done
+
+if [ -z "$NS" ]; then
+  usage
+  exit 1
+fi
+
+if [[ -z $OUT_FILE ]]; then
+  OUT_FILE=$OUT_NAME.tgz
+fi
+
+if [ ! -z "$APP" ]; then
+  _APPS=($APP)
+else
+  _APPS=(`kubectl get namespaces | grep "^$NS-" | tr -s ' ' | cut -d' ' -n -f1 | sed -e "s/^$NS-//"`)
+fi
+
+printf "Collecting information about ONAP deployment...\n"
+printf "Components: %s\n" "${_APPS[*]}"
+
+# Collect common info
+mkdir -p ${TMP_DIR}/${OUT_NAME}/
+echo "${_APPS[*]}" > ${TMP_DIR}/${OUT_NAME}/component-list.log
+printf "Collecting Helm info\n"
+call_with_log "helm version" "${TMP_DIR}/${OUT_NAME}/helm-version.log"
+call_with_log "helm list" "${TMP_DIR}/${OUT_NAME}/helm-list.log"
+
+printf "Collecting Kubernetes info\n"
+call_with_log "kubectl version" "${TMP_DIR}/${OUT_NAME}/k8s-version.log"
+call_with_log "kubectl get nodes -o=wide" "${TMP_DIR}/${OUT_NAME}/k8s-nodes.log"
+call_with_log "kubectl cluster-info" "${TMP_DIR}/${OUT_NAME}/k8s-cluster-info.log"
+call_with_log "kubectl cluster-info dump" "${TMP_DIR}/${OUT_NAME}/k8s-cluster-info-dump.log"
+call_with_log "kubectl top node" "${TMP_DIR}/${OUT_NAME}/k8s-top-node.log"
+
+# Collect per-component info
+for i in ${_APPS[@]}; do
+  printf "Writing Kubernetes info of component $i\n"
+  collect_ns_info "$NS-$i" "${TMP_DIR}/${OUT_NAME}"
+done
+
+# Pack and cleanup
+mkdir -p ${OUT_DIR}
+_OUT_DIR=`readlink -e ${OUT_DIR}`
+printf "Packing output to ${_OUT_DIR}/${OUT_FILE}...\n"
+cd ${TMP_DIR}
+tar cfz ${_OUT_DIR}/${OUT_FILE} ${OUT_NAME}
+cd -
+printf "Cleaning up...\n"
+rm -rf ${TMP_DIR}/${OUT_NAME}
+printf "Done\n"
diff --git a/kubernetes/policy/resources/config/log/drools/logback.xml b/kubernetes/policy/resources/config/log/drools/logback.xml
new file mode 100644 (file)
index 0000000..daecf97
--- /dev/null
@@ -0,0 +1,107 @@
+<!--\r
+  ============LICENSE_START=======================================================\r
+  policy-management\r
+  ================================================================================\r
+  Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.\r
+  ================================================================================\r
+  Licensed under the Apache License, Version 2.0 (the "License");\r
+  you may not use this file except in compliance with the License.\r
+  You may obtain a copy of the License at\r
+\r
+       http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+  Unless required by applicable law or agreed to in writing, software\r
+  distributed under the License is distributed on an "AS IS" BASIS,\r
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+  See the License for the specific language governing permissions and\r
+  limitations under the License.\r
+  ============LICENSE_END=========================================================\r
+  -->\r
+\r
+<configuration scan="true" scanPeriod="30 seconds" debug="false">\r
+\r
+       <property name="logDir" value="/var/log/onap" />\r
+\r
+       <property name="errorLog" value="error" />\r
+       <property name="debugLog" value="debug" />\r
+       <property name="networkLog" value="network" />\r
+\r
+       <property name="debugPattern" value="[%date|%level|%logger{0}|%thread] %msg%n" />\r
+       <property name="errorPattern" value="${debugPattern}" />\r
+       <property name="networkPattern" value="[%d|%t]%m%n" />\r
+\r
+       <appender name="ErrorOut" class="ch.qos.logback.core.rolling.RollingFileAppender">\r
+               <file>${logDir}/${errorLog}.log</file>\r
+               <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
+                       <fileNamePattern>${logDir}/${errorLog}.%i.log.zip</fileNamePattern>\r
+                       <minIndex>1</minIndex>\r
+                       <maxIndex>5</maxIndex>\r
+               </rollingPolicy>\r
+               <filter class="ch.qos.logback.classic.filter.ThresholdFilter">\r
+                       <level>WARN</level>\r
+               </filter>\r
+               <triggeringPolicy\r
+                       class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
+                       <maxFileSize>15MB</maxFileSize>\r
+               </triggeringPolicy>\r
+               <encoder>\r
+                       <pattern>${errorPattern}</pattern>\r
+               </encoder>\r
+       </appender>\r
+\r
+       <appender name="AsyncErrorOut" class="ch.qos.logback.classic.AsyncAppender">\r
+               <appender-ref ref="ErrorOut" />\r
+       </appender>\r
+\r
+       <appender name="DebugOut" class="ch.qos.logback.core.rolling.RollingFileAppender">\r
+               <file>${logDir}/${debugLog}.log</file>\r
+               <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
+                       <fileNamePattern>${logDir}/${debugLog}.%i.log.zip</fileNamePattern>\r
+                       <minIndex>1</minIndex>\r
+                       <maxIndex>9</maxIndex>\r
+               </rollingPolicy>\r
+               <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
+                       <maxFileSize>20MB</maxFileSize>\r
+               </triggeringPolicy>\r
+               <encoder>\r
+                       <pattern>${debugPattern}</pattern>\r
+               </encoder>\r
+       </appender>\r
+\r
+       <appender name="AsyncDebugOut" class="ch.qos.logback.classic.AsyncAppender">\r
+               <appender-ref ref="DebugOut" />\r
+       </appender>\r
+\r
+       <appender name="NetworkOut" class="ch.qos.logback.core.rolling.RollingFileAppender">\r
+               <file>${logDir}/${networkLog}.log</file>\r
+               <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">\r
+                       <fileNamePattern>${logDir}/${networkLog}.%i.log.zip</fileNamePattern>\r
+                       <minIndex>1</minIndex>\r
+                       <maxIndex>9</maxIndex>\r
+               </rollingPolicy>\r
+               <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">\r
+                       <maxFileSize>15MB</maxFileSize>\r
+               </triggeringPolicy>\r
+               <encoder>\r
+                       <pattern>${networkPattern}</pattern>\r
+               </encoder>\r
+       </appender>\r
+\r
+       <appender name="AsyncNetworkOut" class="ch.qos.logback.classic.AsyncAppender">\r
+               <appender-ref ref="NetworkOut" />\r
+       </appender>\r
+\r
+       <logger name="network" level="INFO" additivity="false">\r
+               <appender-ref ref="AsyncNetworkOut" />\r
+       </logger>\r
+\r
+       <logger name="org.eclipse.jetty.server.RequestLog" level="info" additivity="false">\r
+               <appender-ref ref="AsyncNetworkOut" />\r
+       </logger>\r
+\r
+       <root level="INFO">\r
+               <appender-ref ref="AsyncDebugOut" />\r
+               <appender-ref ref="AsyncErrorOut" />\r
+       </root>\r
+\r
+</configuration>
\ No newline at end of file
diff --git a/kubernetes/policy/resources/config/log/filebeat/filebeat.yml b/kubernetes/policy/resources/config/log/filebeat/filebeat.yml
new file mode 100644 (file)
index 0000000..f316b86
--- /dev/null
@@ -0,0 +1,41 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.onap-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
@@ -28,7 +28,7 @@ KEYSTORE_PASSWD=PolicyR0ck$
 # Telemetry credentials
 
 ENGINE_MANAGEMENT_PORT=9696
-ENGINE_MANAGEMENT_HOST=localhost
+ENGINE_MANAGEMENT_HOST=0.0.0.0
 ENGINE_MANAGEMENT_USER=@1b3rt
 ENGINE_MANAGEMENT_PASSWORD=31nst31n
 
@@ -68,12 +68,33 @@ PAP_PASSWORD=alpha123
 PDP_HOST=pdp.onap-policy
 PDP_USERNAME=testpdp
 PDP_PASSWORD=alpha123
+PDP_CLIENT_USERNAME=python
+PDP_CLIENT_PASSWORD=test
+PDP_ENVIRONMENT=TEST
 
 # DCAE DMaaP
 
-DCAE_TOPIC=unauthenticated.TCA_EVENT_OUTPUT
-DCAE_SERVERS=dcae-controller.onap-dcae
+DCAE_TOPIC=unauthenticated.DCAE_CL_OUTPUT
+DCAE_SERVERS=dmaap.onap-message-router
 
 # Open DMaaP
 
 DMAAP_SERVERS=dmaap.onap-message-router
+
+# AAI
+
+AAI_URL=https://aai.api.simpledemo.openecomp.org:8443
+AAI_USERNAME=POLICY
+AAI_PASSWORD=POLICY
+
+# MSO
+
+SO_URL=http://mso.onap-mso:8080/ecomp/mso/infra
+SO_USERNAME=InfraPortalClient
+SO_PASSWORD=password1$
+
+# VFC
+
+VFC_URL=
+VFC_USERNAME=
+VFC_PASSWORD=
\ No newline at end of file
@@ -33,6 +33,6 @@ for DEP in ${DEPS_JSON_RUNTIME} ${DEPS_JSON_INSTALL}; do
        if [ ! -f "${DEP}" ]; then
                echo "warning: configuration does not exist: ${DEP}"
        else
-               sed -i -e "s/\"version\":.*/\"version\": \"${version}\"/g" "${DEP}"
+               sed -i -e "s/\"version\":.*-SNAPSHOT\"/\"version\": \"${version}\"/g" "${DEP}"
        fi
 done
@@ -28,11 +28,8 @@ UEB_API_SECRET=
 
 groupID=org.onap.policy-engine
 artifactID=drlPDPGroup
-VFW_GROUP_ID=org.onap.policy-engine.drools.vFW
-VFW_ARTIFACT_ID=policy-vFW-rules
-VDNS_GROUP_ID=org.onap.policy-engine.drools.vDNS
-VDNS_ARTIFACT_ID=policy-vDNS-rules
-
+AMSTERDAM_GROUP_ID=org.onap.policy-engine.drools.amsterdam
+AMSTERDAM_ARTIFACT_ID=policy-amsterdam-rules
 
 # the java property is RESOURCE_NAME (uppercase), but the conf parameter is lowercase
 resource_name=brmsgw_1
@@ -52,5 +49,5 @@ BRMS_UEB_API_KEY=
 BRMS_UEB_API_SECRET=
 
 #Dependency.json file version
-BRMS_DEPENDENCY_VERSION=1.1.0-SNAPSHOT
+BRMS_DEPENDENCY_VERSION=1.2.0
 
@@ -122,7 +122,7 @@ onap_application_name=
 #-----------------------ONAP-PORTAL-Properties----------------------
 
 ONAP_REDIRECT_URL=http://portalapps.onap-portal:8989/ONAPPORTAL/login.htm
-ONAP_REST_URL=
+ONAP_REST_URL=http://portalapps.onap-portal:8989/ONAPPORTAL/auxapi
 ONAP_UEB_URL_LIST=
 ONAP_PORTAL_INBOX_NAME=
 ONAP_UEB_APP_KEY=
@@ -51,3 +51,6 @@ PDP_DMAAP_AAF_PASSWORD=
 #Required only, when we use AAF
 POLICY_AAF_NAMESPACE=
 POLICY_AAF_RESOURCE=
+
+# Indeterminate resolution
+DECISION_INDETERMINATE_RESPONSE=PERMIT
\ No newline at end of file
diff --git a/kubernetes/policy/resources/config/opt/policy/config/pe/push-policies.sh b/kubernetes/policy/resources/config/opt/policy/config/pe/push-policies.sh
new file mode 100755 (executable)
index 0000000..0461731
--- /dev/null
@@ -0,0 +1,250 @@
+#! /bin/bash
+
+# forked from https://gerrit.onap.org/r/gitweb?p=policy/docker.git;a=blob;f=config/pe/push-policies.sh;h=555ab357e6b4f54237bf07ef5e6777d782564bc0;hb=refs/heads/amsterdam and adapted for OOM
+
+#########################################Upload BRMS Param Template##########################################
+
+echo "Upload BRMS Param Template"
+
+sleep 2
+
+wget -O cl-amsterdam-template.drl https://git.onap.org/policy/drools-applications/plain/controlloop/templates/archetype-cl-amsterdam/src/main/resources/archetype-resources/src/main/resources/__closedLoopControlName__.drl
+
+sleep 2
+
+curl -v --silent -X POST --header 'Content-Type: multipart/form-data' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -F "file=@cl-amsterdam-template.drl" -F "importParametersJson={\"serviceName\":\"ClosedLoopControlName\",\"serviceType\":\"BRMSPARAM\"}" 'http://pdp.onap-policy:8081/pdp/api/policyEngineImport' 
+
+echo "PRELOAD_POLICIES is $PRELOAD_POLICIES"
+
+if [ "$PRELOAD_POLICIES" == "false" ]; then
+    exit 0
+fi
+
+#########################################Create BRMS Param policies##########################################
+
+echo "Create BRMSParam Operational Policies"
+
+sleep 2
+
+echo "Create BRMSParamvFirewall Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/html' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "policyConfigType": "BRMS_PARAM",
+       "policyName": "com.BRMSParamvFirewall",
+       "policyDescription": "BRMS Param vFirewall policy",
+       "policyScope": "com",
+       "attributes": {
+               "MATCHING": {
+               "controller" : "amsterdam"
+           },
+               "RULE": {
+                       "templateName": "ClosedLoopControlName",
+                       "closedLoopControlName": "ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a",
+                       "controlLoopYaml": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a%0D%0A++trigger_policy%3A+unique-policy-id-1-modifyConfig%0D%0A++timeout%3A+1200%0D%0A++abatement%3A+false%0D%0A+%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-modifyConfig%0D%0A++++name%3A+modify+packet+gen+config%0D%0A++++description%3A%0D%0A++++actor%3A+APPC%0D%0A++++recipe%3A+ModifyConfig%0D%0A++++target%3A%0D%0A++++++%23+TBD+-+Cannot+be+known+until+instantiation+is+done%0D%0A++++++resourceID%3A+Eace933104d443b496b8.nodes.heat.vpg%0D%0A++++++type%3A+VNF%0D%0A++++retry%3A+0%0D%0A++++timeout%3A+300%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard"
+               }
+       }
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+sleep 2
+
+echo "Create BRMSParamvDNS Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/html' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "policyConfigType": "BRMS_PARAM",
+       "policyName": "com.BRMSParamvDNS",
+       "policyDescription": "BRMS Param vDNS policy",
+       "policyScope": "com",
+       "attributes": {
+               "MATCHING": {
+               "controller" : "amsterdam"
+           },
+               "RULE": {
+                       "templateName": "ClosedLoopControlName",
+                       "closedLoopControlName": "ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3",
+                       "controlLoopYaml": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3%0D%0A++trigger_policy%3A+unique-policy-id-1-scale-up%0D%0A++timeout%3A+1200%0D%0A++abatement%3A+false%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-scale-up%0D%0A++++name%3A+Create+a+new+VF+Module%0D%0A++++description%3A%0D%0A++++actor%3A+SO%0D%0A++++recipe%3A+VF+Module+Create%0D%0A++++target%3A%0D%0A++++++type%3A+VNF%0D%0A++++retry%3A+0%0D%0A++++timeout%3A+1200%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard"
+               }
+       }
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+sleep 2
+
+echo "Create BRMSParamVOLTE Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/html' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "policyConfigType": "BRMS_PARAM",
+       "policyName": "com.BRMSParamVOLTE",
+       "policyDescription": "BRMS Param VOLTE policy",
+       "policyScope": "com",
+       "attributes": {
+               "MATCHING": {
+               "controller" : "amsterdam"
+           },
+               "RULE": {
+                       "templateName": "ClosedLoopControlName",
+                       "closedLoopControlName": "ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b",
+                       "controlLoopYaml": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-VOLTE-2179b738-fd36-4843-a71a-a8c24c70c55b%0D%0A++trigger_policy%3A+unique-policy-id-1-restart%0D%0A++timeout%3A+3600%0D%0A++abatement%3A+false%0D%0A+%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-restart%0D%0A++++name%3A+Restart+the+VM%0D%0A++++description%3A%0D%0A++++actor%3A+VFC%0D%0A++++recipe%3A+Restart%0D%0A++++target%3A%0D%0A++++++type%3A+VM%0D%0A++++retry%3A+3%0D%0A++++timeout%3A+1200%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard"
+               }
+       }
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+sleep 2
+
+echo "Create BRMSParamvCPE Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/html' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "policyConfigType": "BRMS_PARAM",
+       "policyName": "com.BRMSParamvCPE",
+       "policyDescription": "BRMS Param vCPE policy",
+       "policyScope": "com",
+       "attributes": {
+           "MATCHING": {
+               "controller" : "amsterdam"
+           },
+               "RULE": {
+                       "templateName": "ClosedLoopControlName",
+                       "closedLoopControlName": "ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e",
+                       "controlLoopYaml": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e%0D%0A++trigger_policy%3A+unique-policy-id-1-restart%0D%0A++timeout%3A+3600%0D%0A++abatement%3A+true%0D%0A+%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-restart%0D%0A++++name%3A+Restart+the+VM%0D%0A++++description%3A%0D%0A++++actor%3A+APPC%0D%0A++++recipe%3A+Restart%0D%0A++++target%3A%0D%0A++++++type%3A+VM%0D%0A++++retry%3A+3%0D%0A++++timeout%3A+1200%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard"
+               }
+       }
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+#########################################Create Micro Service Config policies##########################################
+
+echo "Create MicroService Config Policies"
+
+sleep 2
+
+echo "Create MicroServicevFirewall Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "configBody": "{ \"service\": \"tca_policy\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevFirewall\", \"description\": \"MicroService vFirewall Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.1.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"tca_policy\": { \"domain\": \"measurementsForVfScaling\", \"metricsPerEventName\": [{ \"eventName\": \"vFirewallBroadcastPackets\", \"controlLoopSchemaType\": \"VNF\", \"policyScope\": \"DCAE\", \"policyName\": \"DCAE.Config_tca-hi-lo\", \"policyVersion\": \"v0.0.1\", \"thresholds\": [{ \"closedLoopControlName\": \"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\", \"version\": \"1.0.2\", \"fieldPath\": \"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\", \"thresholdValue\": 300, \"direction\": \"LESS_OR_EQUAL\", \"severity\": \"MAJOR\", \"closedLoopEventStatus\": \"ONSET\" }, { \"closedLoopControlName\": \"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\", \"version\": \"1.0.2\", \"fieldPath\": \"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\", \"thresholdValue\": 700, \"direction\": \"GREATER_OR_EQUAL\", \"severity\": \"CRITICAL\", \"closedLoopEventStatus\": \"ONSET\" } ] }] } } }",
+       "policyConfigType": "MicroService",
+       "policyName": "com.MicroServicevFirewall",
+       "onapName": "DCAE"
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+
+sleep 2
+
+echo "Create MicroServicevDNS Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "configBody": "{ \"service\": \"tca_policy\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevDNS\", \"description\": \"MicroService vDNS Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.1.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"tca_policy\": { \"domain\": \"measurementsForVfScaling\", \"metricsPerEventName\": [{ \"eventName\": \"vLoadBalancer\", \"controlLoopSchemaType\": \"VM\", \"policyScope\": \"DCAE\", \"policyName\": \"DCAE.Config_tca-hi-lo\", \"policyVersion\": \"v0.0.1\", \"thresholds\": [{ \"closedLoopControlName\": \"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\", \"version\": \"1.0.2\", \"fieldPath\": \"$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\", \"thresholdValue\": 300, \"direction\": \"GREATER_OR_EQUAL\", \"severity\": \"CRITICAL\", \"closedLoopEventStatus\": \"ONSET\" }] }] } } }",
+       "policyConfigType": "MicroService",
+       "policyName": "com.MicroServicevDNS",
+       "onapName": "DCAE"
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+
+sleep 2
+
+echo "Create MicroServicevCPE Policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+       "configBody": "{ \"service\": \"tca_policy\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevCPE\", \"description\": \"MicroService vCPE Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.1.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"tca_policy\": { \"domain\": \"measurementsForVfScaling\", \"metricsPerEventName\": [{ \"eventName\": \"Measurement_vGMUX\", \"controlLoopSchemaType\": \"VNF\", \"policyScope\": \"DCAE\", \"policyName\": \"DCAE.Config_tca-hi-lo\", \"policyVersion\": \"v0.0.1\", \"thresholds\": [{ \"closedLoopControlName\": \"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"version\": \"1.0.2\", \"fieldPath\": \"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\", \"thresholdValue\": 0, \"direction\": \"EQUAL\", \"severity\": \"MAJOR\", \"closedLoopEventStatus\": \"ABATED\" }, { \"closedLoopControlName\": \"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"version\": \"1.0.2\", \"fieldPath\": \"$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\", \"thresholdValue\": 0, \"direction\": \"GREATER\", \"severity\": \"CRITICAL\", \"closedLoopEventStatus\": \"ONSET\" }] }] } } }",
+       "policyConfigType": "MicroService",
+       "policyName": "com.MicroServicevCPE",
+       "onapName": "DCAE"
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+
+#########################################Creating Decision Guard policy######################################### 
+
+sleep 2
+
+echo "Creating Decision Guard policy"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{ 
+       "policyClass": "Decision", 
+       "policyName": "com.AllPermitGuard", 
+       "policyDescription": "Testing all Permit YAML Guard Policy", 
+       "ecompName": "PDPD", 
+       "ruleProvider": "GUARD_YAML", 
+       "attributes": { 
+               "MATCHING": { 
+                       "actor": ".*", 
+                       "recipe": ".*", 
+                       "targets": ".*", 
+                       "clname": ".*", 
+                       "limit": "10", 
+                       "timeWindow": "1", 
+                       "timeUnits": "minute", 
+                       "guardActiveStart": "00:00:01-05:00", 
+                       "guardActiveEnd": "00:00:00-05:00" 
+               } 
+       } 
+}' 'http://pdp.onap-policy:8081/pdp/api/createPolicy'
+
+#########################################Push Decision policy#########################################
+
+sleep 2
+
+echo "Push Decision policy" 
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{ 
+  "pdpGroup": "default", 
+  "policyName": "com.AllPermitGuard", 
+  "policyType": "DECISION" 
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
+
+#########################################Pushing BRMS Param policies##########################################
+
+echo "Pushing BRMSParam Operational policies"
+
+sleep 2
+
+echo "pushPolicy : PUT : com.BRMSParamvFirewall"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.BRMSParamvFirewall",
+  "policyType": "BRMS_Param"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
+
+sleep 2
+
+echo "pushPolicy : PUT : com.BRMSParamvDNS"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.BRMSParamvDNS",
+  "policyType": "BRMS_Param"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
+
+sleep 2
+
+echo "pushPolicy : PUT : com.BRMSParamVOLTE"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.BRMSParamVOLTE",
+  "policyType": "BRMS_Param"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
+
+sleep 2
+
+echo "pushPolicy : PUT : com.BRMSParamvCPE"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.BRMSParamvCPE",
+  "policyType": "BRMS_Param"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
+
+#########################################Pushing MicroService Config policies##########################################
+
+echo "Pushing MicroService Config policies"
+
+sleep 2
+
+echo "pushPolicy : PUT : com.MicroServicevFirewall"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.MicroServicevFirewall",
+  "policyType": "MicroService"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy'
+
+sleep 10
+
+echo "pushPolicy : PUT : com.MicroServicevDNS"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.MicroServicevDNS",
+  "policyType": "MicroService"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy' 
+
+sleep 10
+
+echo "pushPolicy : PUT : com.MicroServicevCPE"
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.MicroServicevCPE",
+  "policyType": "MicroService"
+}' 'http://pdp.onap-policy:8081/pdp/api/pushPolicy' 
\ No newline at end of file
diff --git a/kubernetes/policy/scripts/update-vfw-op-policy.sh b/kubernetes/policy/scripts/update-vfw-op-policy.sh
new file mode 100755 (executable)
index 0000000..39483a0
--- /dev/null
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+if [ "$#" -ne 4 ]; then
+       echo "Usage: $(basename $0) <k8s-host> <policy-pdp-node-port> <policy-drools-node-port> <resource-id>"
+       exit 1
+fi
+
+K8S_HOST=$1
+POLICY_PDP_PORT=$2
+POLICY_DROOLS_PORT=$3
+RESOURCE_ID=$4
+
+echo
+echo
+echo "Removing the vFW Policy from PDP.."
+echo
+echo
+
+curl -v -X DELETE --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyComponent" : "PDP",
+  "policyName": "com.BRMSParamvFirewall",
+  "policyType": "BRMS_Param"
+}' http://${K8S_HOST}:${POLICY_PDP_PORT}/pdp/api/deletePolicy
+
+sleep 20
+
+echo
+
+echo
+echo "Updating vFW Operational Policy .."
+echo
+
+curl -v -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "policyConfigType": "BRMS_PARAM",
+  "policyName": "com.BRMSParamvFirewall",
+  "policyDescription": "BRMS Param vFirewall policy",
+  "policyScope": "com",
+  "attributes": {
+    "MATCHING": {
+      "controller": "amsterdam"
+    },
+    "RULE": {
+      "templateName": "ClosedLoopControlName",
+      "closedLoopControlName": "ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a",
+      "controlLoopYaml": "controlLoop%3A%0D%0A++version%3A+2.0.0%0D%0A++controlLoopName%3A+ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a%0D%0A++trigger_policy%3A+unique-policy-id-1-modifyConfig%0D%0A++timeout%3A+1200%0D%0A++abatement%3A+false%0D%0A+%0D%0Apolicies%3A%0D%0A++-+id%3A+unique-policy-id-1-modifyConfig%0D%0A++++name%3A+modify+packet+gen+config%0D%0A++++description%3A%0D%0A++++actor%3A+APPC%0D%0A++++recipe%3A+ModifyConfig%0D%0A++++target%3A%0D%0A++++++%23+TBD+-+Cannot+be+known+until+instantiation+is+done%0D%0A++++++resourceID%3A+'${RESOURCE_ID}'%0D%0A++++++type%3A+VNF%0D%0A++++retry%3A+0%0D%0A++++timeout%3A+300%0D%0A++++success%3A+final_success%0D%0A++++failure%3A+final_failure%0D%0A++++failure_timeout%3A+final_failure_timeout%0D%0A++++failure_retries%3A+final_failure_retries%0D%0A++++failure_exception%3A+final_failure_exception%0D%0A++++failure_guard%3A+final_failure_guard"
+    }
+  }
+}' http://${K8S_HOST}:${POLICY_PDP_PORT}/pdp/api/updatePolicy
+
+sleep 5
+
+echo
+echo
+echo "Pushing the vFW Policy .."
+echo
+echo
+
+curl -v --silent -X PUT --header 'Content-Type: application/json' --header 'Accept: text/plain' --header 'ClientAuth: cHl0aG9uOnRlc3Q=' --header 'Authorization: Basic dGVzdHBkcDphbHBoYTEyMw==' --header 'Environment: TEST' -d '{
+  "pdpGroup": "default",
+  "policyName": "com.BRMSParamvFirewall",
+  "policyType": "BRMS_Param"
+}' http://${K8S_HOST}:${POLICY_PDP_PORT}/pdp/api/pushPolicy
+
+sleep 20
+
+echo
+echo
+echo "Restarting PDP-D .."
+echo
+echo
+
+POD=$(kubectl --namespace onap-policy get pods | sed 's/ .*//'| grep drools)
+kubectl --namespace onap-policy exec -it ${POD} -- bash -c "source /opt/app/policy/etc/profile.d/env.sh && policy stop && sleep 5 && policy start"
+
+sleep 20
+
+echo
+echo
+echo "PDP-D amsterdam maven coordinates .."
+echo
+echo
+
+curl -vvv --silent --user @1b3rt:31nst31n -X GET http://${K8S_HOST}:${POLICY_DROOLS_PORT}/policy/pdp/engine/controllers/amsterdam/drools  | python -m json.tool
+
+
+echo
+echo
+echo "PDP-D control loop updated .."
+echo
+echo
+
+curl -v --silent --user @1b3rt:31nst31n -X GET http://${K8S_HOST}:${POLICY_DROOLS_PORT}/policy/pdp/engine/controllers/amsterdam/drools/facts/closedloop-amsterdam/org.onap.policy.controlloop.Params  | python -m json.tool
index a1c4804..c6b633e 100644 (file)
@@ -45,6 +45,9 @@ spec:
   - name: "drools-port"
     port: 6969
     nodePort: {{ .Values.nodePortPrefix }}17
+  - name: "drools-port2"
+    port: 9696
+    nodePort: {{ .Values.nodePortPrefix }}21
   selector:
     app: drools
   type: NodePort
@@ -125,4 +128,4 @@ spec:
   selector:
     app: brmsgw
   type: NodePort
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 7a1bcde..667ccc6 100644 (file)
@@ -20,13 +20,7 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - mariadb
-        - --container-name
-        - nexus
-        - --container-name
         - pap
-        - --container-name
-        - pdp
         env:
         - name: NAMESPACE
           valueFrom:
@@ -55,8 +49,9 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: pe
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/opt/policy/config/pe/
+          configMap:
+            name: policy-dep-pe-configmap
+            defaultMode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index 48ef16e..afa044a 100644 (file)
@@ -23,12 +23,6 @@ spec:
         - mariadb
         - --container-name
         - nexus
-        - --container-name
-        - pap
-        - --container-name
-        - pdp
-        - --container-name
-        - brmsgw
         env:
         - name: NAMESPACE
           valueFrom:
@@ -38,6 +32,10 @@ spec:
         image: "{{ .Values.image.readiness }}:{{ .Values.image.readinessVersion }}"
         imagePullPolicy: {{ .Values.pullPolicy }}
         name: drools-readiness
+      hostAliases:
+      - ip: "{{ .Values.aaiServiceClusterIp }}"
+        hostnames:
+        - "aai.api.simpledemo.openecomp.org"
       containers:
       - command:
         - /bin/bash
@@ -61,10 +59,12 @@ spec:
           name: drools
         - mountPath: /usr/share/maven/conf/settings.xml
           name: drools-settingsxml
+          subPath: settings.xml
         - mountPath: /var/log/onap
           name: policy-logs
         - mountPath: /tmp/logback.xml
           name: policy-logback
+          subPath: logback.xml
         lifecycle:
           postStart:
             exec:
@@ -75,6 +75,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: policy-logs
         - mountPath: /usr/share/filebeat/data
@@ -84,21 +85,37 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+          configMap:
+            name: policy-filebeat-configmap
         - name: policy-logs
           emptyDir: {}
         - name: policy-data-filebeat
           emptyDir: {}
         - name: policy-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/policy/drools/logback.xml
+          configMap:
+            name: policy-drools-log-configmap
         - name: drools-settingsxml
-          hostPath:
-            path:  /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/drools/settings.xml
+          configMap:
+            name: policy-dep-drools-settings-configmap
         - name: drools
-          hostPath:
-            path:  /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/opt/policy/config/drools/
+          secret:
+            secretName: policy-dep-drools-secret
+            items:
+            - key: policy-keystore
+              path: policy-keystore
+              mode: 0644
+            - key: feature-healthcheck.conf
+              path: feature-healthcheck.conf
+              mode: 0644
+            - key: base.conf
+              path: base.conf
+              mode: 0755
+            - key: policy-management.conf
+              path: policy-management.conf
+              mode: 0755
+            - key: drools-tweaks.sh
+              path: drools-tweaks.sh
+              mode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index c0f5060..c921e8c 100644 (file)
@@ -24,8 +24,6 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /var/lib/mysql
-          name: policy-mariadb-data
         readinessProbe:
           tcpSocket:
             port: 3306
@@ -35,9 +33,6 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: policy-mariadb-data
-        persistentVolumeClaim:
-          claimName: policy-db
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
\ No newline at end of file
index 523cfd3..d3d5ff3 100644 (file)
@@ -19,8 +19,6 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - nexus
-        - --container-name
         - mariadb
         env:
         - name: NAMESPACE
@@ -52,6 +50,9 @@ spec:
         image: "{{ .Values.image.policyPe }}:{{ .Values.image.policyPeVersion }}"
         imagePullPolicy: {{ .Values.pullPolicy }}
         name: pap
+        env:
+        - name: PRELOAD_POLICIES
+          value: "true"
         ports:
         - containerPort: 8443
         - containerPort: 9091
@@ -70,8 +71,10 @@ spec:
           name: policy-logs
         - mountPath: /tmp/policy-install/logback.xml
           name: policy-sdk-logback
+          subPath: logback.xml
         - mountPath: /tmp/logback.xml
           name: policy-logback
+          subPath: logback.xml
         lifecycle:
           postStart:
             exec:
@@ -82,6 +85,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: policy-logs
         - mountPath: /usr/share/filebeat/data
@@ -91,21 +95,22 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+          configMap:
+            name: policy-filebeat-configmap
         - name: policy-logs
           emptyDir: {}
         - name: policy-data-filebeat
           emptyDir: {}
         - name: policy-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/policy/xacml-pap-rest/logback.xml
+          configMap:
+            name: policy-pap-log-configmap
         - name: policy-sdk-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/policy/ep_sdk_app/logback.xml
+          configMap:
+            name: policy-sdk-log-configmap
         - name: pe
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/opt/policy/config/pe/
+          configMap:
+            name: policy-dep-pe-configmap
+            defaultMode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index 3763b95..b62e280 100644 (file)
@@ -20,10 +20,6 @@ spec:
         - /root/ready.py
         args:
         - --container-name
-        - mariadb
-        - --container-name
-        - nexus
-        - --container-name
         - pap
         env:
         - name: NAMESPACE
@@ -59,6 +55,7 @@ spec:
           name: policy-logs
         - mountPath:  /tmp/logback.xml
           name: policy-logback
+          subPath: logback.xml
         lifecycle:
           postStart:
             exec:
@@ -69,6 +66,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: policy-logs
         - mountPath: /usr/share/filebeat/data
@@ -78,18 +76,19 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+          configMap:
+            name: policy-filebeat-configmap
         - name: policy-logs
           emptyDir: {}
         - name: policy-data-filebeat
           emptyDir: {}
         - name: policy-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/policy/xacml-pdp-rest/logback.xml
+          configMap:
+            name: policy-pdp-log-configmap
         - name: pe
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/opt/policy/config/pe/
+          configMap:
+            name: policy-dep-pe-configmap
+            defaultMode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
diff --git a/kubernetes/policy/templates/policy-deployment-configmap.yaml b/kubernetes/policy/templates/policy-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..b1db02b
--- /dev/null
@@ -0,0 +1,19 @@
+#{{ if not .Values.disablePolicyDrools }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-dep-drools-settings-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{ (.Files.Glob "resources/config/drools/settings.xml").AsConfig | indent 2 }}
+#{{ end }}
+---
+#{{ if not .Values.disablePolicyPdp }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-dep-pe-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{ (.Files.Glob "resources/config/opt/policy/config/pe/*").AsConfig | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/policy/templates/policy-deployment-secret.yaml b/kubernetes/policy/templates/policy-deployment-secret.yaml
new file mode 100644 (file)
index 0000000..99a807b
--- /dev/null
@@ -0,0 +1,10 @@
+#{{ if not .Values.disablePolicyDrools }}
+apiVersion: v1
+kind: Secret
+metadata:
+  name: policy-dep-drools-secret
+  namespace: {{ .Values.nsPrefix }}-policy
+type: Opaque
+data:
+{{ tpl (.Files.Glob "resources/config/opt/policy/config/drools/*").AsSecrets . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/policy/templates/policy-log-configmap.yaml b/kubernetes/policy/templates/policy-log-configmap.yaml
new file mode 100644 (file)
index 0000000..24ad45e
--- /dev/null
@@ -0,0 +1,45 @@
+#{{ if not .Values.disablePolicyPap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-pap-log-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{ (.Files.Glob "resources/config/log/xacml-pap-rest/*").AsConfig | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-sdk-log-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{ (.Files.Glob "resources/config/log/ep_sdk_app/*").AsConfig | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{  (.Files.Glob "resources/config/log/filebeat/*").AsConfig | indent 2 }}
+#{{ end }}
+#{{ if not .Values.disablePolicyPdp }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-pdp-log-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{ (.Files.Glob "resources/config/log/xacml-pdp-rest/*").AsConfig | indent 2 }}
+#{{ end }}
+#{{ if not .Values.disablePolicyDrools }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: policy-drools-log-configmap
+  namespace: {{ .Values.nsPrefix }}-policy
+data:
+{{ (.Files.Glob "resources/config/log/drools/*").AsConfig | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/policy/templates/policy-pv-pvc.yaml b/kubernetes/policy/templates/policy-pv-pvc.yaml
deleted file mode 100644 (file)
index 5dc0c61..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#{{ if not .Values.disablePolicyMariadb }}
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: "{{ .Values.nsPrefix }}-policy-db"
-  namespace: "{{ .Values.nsPrefix }}-policy"
-  labels:
-    name: "{{ .Values.nsPrefix }}-policy-db"
-spec:
-  capacity:
-    storage: 2Gi
-  accessModes:
-    - ReadWriteMany
-  persistentVolumeReclaimPolicy: Retain
-  hostPath:
-    path: /dockerdata-nfs/{{ .Values.nsPrefix }}/policy/mariadb/data/
----
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: policy-db
-  namespace: "{{ .Values.nsPrefix }}-policy"
-spec:
-  accessModes:
-    - ReadWriteMany
-  resources:
-    requests:
-      storage: 2Gi
-  selector:
-    matchLabels:
-      name: "{{ .Values.nsPrefix }}-policy-db"
-#{{ end }}
\ No newline at end of file
index ce037d8..f52dc44 100644 (file)
@@ -1,6 +1,11 @@
 nsPrefix: onap
 pullPolicy: Always
 nodePortPrefix: 302
+
+# POLICY hotfix - Note this must be temporary
+# See https://jira.onap.org/browse/POLICY-510
+aaiServiceClusterIp: 10.43.255.254
+
 image:
   readiness: oomk8s/readiness-check
   readinessVersion: 1.0.0
diff --git a/kubernetes/portal/docker/init/ubuntu/Dockerfile b/kubernetes/portal/docker/init/ubuntu/Dockerfile
deleted file mode 100644 (file)
index 779a7fa..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM ubuntu:16.04
-
-ENV no_proxy "localhost,127.0.0.1,.cluster.local,$KUBERNETES_SERVICE_HOST"
-# Setup Corporate proxy
-ENV https_proxy ${HTTPS_PROXY}
-ENV http_proxy ${HTTP_PROXY}
-
-RUN apt-get update
-RUN apt-get install -y host dnsutils
-
-VOLUME /ubuntu-init
diff --git a/kubernetes/portal/resources/config/log/filebeat/filebeat.yml b/kubernetes/portal/resources/config/log/filebeat/filebeat.yml
new file mode 100644 (file)
index 0000000..f316b86
--- /dev/null
@@ -0,0 +1,41 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.onap-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
@@ -5,3 +5,4 @@ Any updates required by OOM to the portaldb are made here.
 while the OOM K8s version has these service split up.
 */
 UPDATE fn_app SET app_rest_endpoint = 'http://sdc.api.be.simpledemo.onap.org:8080/api/v2' where app_name = 'SDC';
+UPDATE fn_app SET app_url = 'http://cli.api.simpledemo.onap.org:8080', app_type = 1 where app_name='CLI';
diff --git a/kubernetes/portal/resources/scripts/update_hosts.sh b/kubernetes/portal/resources/scripts/update_hosts.sh
new file mode 100644 (file)
index 0000000..cd38faf
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+_SRC_HOST=$1
+_DST_HOST=$2
+_IP=`getent hosts ${_SRC_HOST}|cut -d' ' -f1`
+if [ -z ${_IP} ]; then
+  echo "Cannot retreive IP for host mapping ${_SRC_HOST} -> ${_DST_HOST}"
+  exit 1
+fi
+_REGEX=".*[[:blank:]]${_DST_HOST}$"
+if grep -c -e "${_REGEX}" /etc/hosts > /dev/null 2>&1 ; then
+  cp /etc/hosts /tmp/hosts
+  sed -i "s/${_REGEX}/${_IP} ${_DST_HOST}/g" /tmp/hosts
+  cp /tmp/hosts /etc/hosts
+else
+  echo "${_IP} ${_DST_HOST}" >> /etc/hosts
+fi
diff --git a/kubernetes/portal/templates/portal-apps-configmap.yaml b/kubernetes/portal/templates/portal-apps-configmap.yaml
new file mode 100644 (file)
index 0000000..044e0d8
--- /dev/null
@@ -0,0 +1,35 @@
+#{{ if not .Values.disablePortalPortalapps }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-onap-portal-sdk-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/portal-fe/webapps/etc/ONAPPORTALSDK/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-onap-portal-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/portal-fe/webapps/etc/ONAPPORTAL/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-mariadb-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/mariadb/oom_updates.sql").AsConfig . | indent 2 }}
+#{{ end }}
+---
+#{{ if not .Values.disablePortalPortalwidgets }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-onapwidgetms-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/portal-fe/webapps/etc/ONAPWIDGETMS/application.properties").AsConfig . | indent 2 }}
+#{{ end }}
index 761441a..c6a38bd 100755 (executable)
@@ -5,6 +5,7 @@ metadata:
   name: portalapps
   namespace: "{{ .Values.nsPrefix }}-portal"
 spec:
+  replicas: {{ .Values.portalAppsReplicas }}
   selector:
     matchLabels:
       app: portalapps
@@ -33,11 +34,19 @@ spec:
         volumeMounts:
         - mountPath: /portal-mysql/oom_updates.sql
           name: portal-mariadb-onboarding-sql
+          subPath: oom_updates.sql
         - mountPath: /portal_root/
           name: portal-root
         image: {{ .Values.image.mariadbClient }}
         imagePullPolicy: {{ .Values.pullPolicy }}
         name: provision-portaldb-users
+      - command: ["/bin/bash", "-c", "mkdir -p /ubuntu-init/ && chmod -R 777 /ubuntu-init/"]
+        volumeMounts:
+        - name: portal-logs
+          mountPath: /ubuntu-init/
+        image: {{ .Values.image.ubuntuInit }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: portal-app-logs-init
       containers:
       - image: {{ .Values.image.portalapps }}
         imagePullPolicy: {{ .Values.pullPolicy }}
@@ -51,23 +60,32 @@ spec:
           name: localtime
           readOnly: true
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTAL/WEB-INF/fusion/conf/fusion.properties"
-          name: portal-fusion-properties
+          name: onap-portal-properties
+          subPath: fusion.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTAL/WEB-INF/classes/openid-connect.properties"
-          name: portal-openid-connect-properties
+          name: onap-portal-properties
+          subPath: openid-connect.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTAL/WEB-INF/conf/system.properties"
-          name: portal-system-properties
+          name: onap-portal-properties
+          subPath: system.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTAL/WEB-INF/classes/portal.properties"
-          name: portal-portal-properties
+          name: onap-portal-properties
+          subPath: portal.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTAL/WEB-INF/classes/logback.xml"
           name: portal-logback
+          subPath: logback.xml
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTALSDK/WEB-INF/conf/system.properties"
-          name: sdkapp-system-properties
+          name: portal-sdkapp-properties
+          subPath: system.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTALSDK/WEB-INF/classes/portal.properties"
-          name: sdkapp-portal-properties
+          name: portal-sdkapp-properties
+          subPath: portal.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTALSDK/WEB-INF/fusion/conf/fusion.properties"
-          name: sdkapp-fusion-properties
+          name: portal-sdkapp-properties
+          subPath: fusion.properties
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/ONAPPORTALSDK/WEB-INF/classes/logback.xml"
           name: sdkapp-logback
+          subPath: logback.xml
         - mountPath: /portal_root/
           name: portal-root
         - mountPath: "{{ .Values.onapPortal.webappsDir }}/logs"
@@ -89,6 +107,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: portal-logs2
         - mountPath: /usr/share/filebeat/data
@@ -98,42 +117,29 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+          configMap:
+            name: portal-filebeat-configmap
         - name: portal-logs2
           emptyDir: {}
         - name: portal-data-filebeat
           emptyDir: {}
-        - name: portal-fusion-properties
-          hostPath:
-            path:  /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTAL/fusion.properties
-        - name: portal-openid-connect-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTAL/openid-connect.properties
-        - name: portal-system-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTAL/system.properties
-        - name: portal-portal-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTAL/portal.properties
+        - name: onap-portal-properties
+          configMap:
+            defaultMode: 0755
+            name: portal-onap-portal-configmap
+        - name: portal-sdkapp-properties
+          configMap:
+            defaultMode: 0755
+            name: portal-onap-portal-sdk-configmap
         - name: portal-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/portal/onapportal/logback.xml
-        - name: sdkapp-system-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTALSDK/system.properties
-        - name: sdkapp-portal-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTALSDK/portal.properties
-        - name: sdkapp-fusion-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPPORTALSDK/fusion.properties
+          configMap:
+            name: portal-onapportal-log-configmap
         - name: sdkapp-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/portal/onapportalsdk/logback.xml
+          configMap:
+            name: portal-onapportalsdk-log-configmap
         - name: portal-mariadb-onboarding-sql
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/mariadb/oom_updates.sql
+          configMap:
+            name: portal-mariadb-configmap
         - name: portal-root
           hostPath:
             path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal
diff --git a/kubernetes/portal/templates/portal-logs-configmap.yaml b/kubernetes/portal/templates/portal-logs-configmap.yaml
new file mode 100644 (file)
index 0000000..c5735ba
--- /dev/null
@@ -0,0 +1,26 @@
+#{{ if not .Values.disablePortalPortalapps }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/filebeat.yml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-onapportal-log-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/log/portal/onapportal/logback.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: portal-onapportalsdk-log-configmap
+  namespace: {{ .Values.nsPrefix }}-portal
+data:
+{{ tpl (.Files.Glob "resources/config/log/portal/onapportalsdk/logback.xml").AsConfig . | indent 2 }}
+#{{ end }}
+
index fafcad2..9db512a 100755 (executable)
@@ -5,6 +5,7 @@ metadata:
   name: portaldb
   namespace: "{{ .Values.nsPrefix }}-portal"
 spec:
+  replicas: {{ .Values.portalDbReplicas }}
   selector:
     matchLabels:
       app: portaldb
index 536ac37..3a80572 100644 (file)
@@ -6,6 +6,14 @@ metadata:
 data:
 {{ (.Files.Glob "resources/vnc/init_profile/profiles.ini").AsConfig | indent 2 }}
 ---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vnc-update-hosts
+  namespace: "{{ .Values.nsPrefix }}-portal"
+data:
+{{ (.Files.Glob "resources/scripts/update_hosts.sh").AsConfig | indent 2 }}
+---
 apiVersion: extensions/v1beta1
 kind: Deployment
 metadata:
@@ -14,6 +22,7 @@ metadata:
   name: vnc-portal
   namespace: "{{ .Values.nsPrefix }}-portal"
 spec:
+  replicas: {{ .Values.vncPortalReplicas }}
   selector:
     matchLabels:
       app: vnc-portal
@@ -71,21 +80,13 @@ spec:
         image: {{ .Values.image.readiness }}
         imagePullPolicy: {{ .Values.pullPolicy }}
         name: vnc-vid-readiness
-      - command: ["/bin/sh","-c"]
-        args: ["echo `host sdc-be.{{ .Values.nsPrefix }}-sdc | awk ''{print$4}''` sdc.api.be.simpledemo.onap.org  >> /ubuntu-init/hosts; echo `host portalapps.{{ .Values.nsPrefix }}-portal | awk ''{print$4}''` portal.api.simpledemo.onap.org  >> /ubuntu-init/hosts; echo `host pap.{{ .Values.nsPrefix }}-policy | awk ''{print$4}''` policy.api.simpledemo.onap.org  >> /ubuntu-init/hosts; echo `host sdc-fe.{{ .Values.nsPrefix }}-sdc | awk ''{print$4}''` sdc.api.simpledemo.onap.org  >> /ubuntu-init/hosts; echo `host vid-server.{{ .Values.nsPrefix }}-vid | awk ''{print$4}''` vid.api.simpledemo.onap.org >> /ubuntu-init/hosts; echo `host sparky-be.{{ .Values.nsPrefix }}-aai | awk ''{print$4}''` aai.api.simpledemo.onap.org  >> /ubuntu-init/hosts"]
-        image: {{ .Values.image.ubuntuInit }}
-        imagePullPolicy: {{ .Values.pullPolicy }}
-        name: vnc-init-hosts
-        volumeMounts:
-        - name: ubuntu-init
-          mountPath: /ubuntu-init/
       containers:
       - image: {{ .Values.image.ubuntuDesktop }}
         imagePullPolicy: {{ .Values.pullPolicy }}
         lifecycle:
           postStart:
             exec:
-              command: ["/bin/sh", "-c", "mkdir -p /root/.mozilla/firefox/onap.default; cp /root/.init_profile/profiles.ini /root/.mozilla/firefox/; echo 'user_pref(\"browser.tabs.remote.autostart.2\", false);' > /root/.mozilla/firefox/onap.default/prefs.js; cat /ubuntu-init/hosts >> /etc/hosts"]
+              command: ["/bin/sh", "-c", "mkdir -p /root/.mozilla/firefox/onap.default; cp /root/.init_profile/profiles.ini /root/.mozilla/firefox/; echo 'user_pref(\"browser.tabs.remote.autostart.2\", false);' > /root/.mozilla/firefox/onap.default/prefs.js; (while true; do /tmp/update_hosts.sh sdc-be.{{ .Values.nsPrefix }}-sdc sdc.api.be.simpledemo.onap.org; /tmp/update_hosts.sh portalapps.{{ .Values.nsPrefix }}-portal portal.api.simpledemo.onap.org; /tmp/update_hosts.sh pap.{{ .Values.nsPrefix }}-policy policy.api.simpledemo.onap.org; /tmp/update_hosts.sh sdc-fe.{{ .Values.nsPrefix }}-sdc sdc.api.simpledemo.onap.org; /tmp/update_hosts.sh vid-server.{{ .Values.nsPrefix }}-vid vid.api.simpledemo.onap.org; /tmp/update_hosts.sh sparky-be.{{ .Values.nsPrefix }}-aai aai.api.simpledemo.onap.org; /tmp/update_hosts.sh cli.{{ .Values.nsPrefix }}-cli cli.api.simpledemo.onap.org; sleep 10; done)  > update_hosts.log 2>&1 &"]
         env:
         - name: VNC_PASSWORD
           value: password
@@ -94,11 +95,12 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /ubuntu-init/
-          name: ubuntu-init
         - mountPath: /root/.init_profile/profiles.ini
           name: vnc-profiles-ini
           subPath: profiles.ini
+        - mountPath: /tmp/update_hosts.sh
+          name: vnc-update-hosts
+          subPath: update_hosts.sh
         securityContext:
           privileged: true
       securityContext: {}
@@ -106,11 +108,13 @@ spec:
         - name: localtime
           hostPath:
             path: /etc/localtime
-        - name: ubuntu-init
-          emptyDir: {}
         - name: vnc-profiles-ini
           configMap:
             name: vnc-profiles-ini
+        - name: vnc-update-hosts
+          configMap:
+            name: vnc-update-hosts
+            defaultMode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index 6004193..6f27ef6 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: portalwidgets
   namespace: "{{ .Values.nsPrefix }}-portal"
 spec:
+  replicas: {{ .Values.portalWidgetsReplicas }}
   selector:
     matchLabels:
       app: portalwidgets
@@ -31,7 +32,7 @@ spec:
         name: portalapps-readiness
       containers:
       - image: {{ .Values.image.portalwms }}
-        imagePullPolicy: {{ .Values.pullPolicy }} 
+        imagePullPolicy: {{ .Values.pullPolicy }}
         name: portalwidgets
         volumeMounts:
         - mountPath: /etc/localtime
@@ -39,6 +40,7 @@ spec:
           readOnly: true
         - mountPath: /application.properties
           name: portalwidgets-application-properties
+          subPath: application.properties
         ports:
         - containerPort: 8082
         readinessProbe:
@@ -51,8 +53,8 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: portalwidgets-application-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/portal/portal-fe/webapps/etc/ONAPWIDGETMS/application.properties
+          configMap:
+            name: portal-onapwidgetms-configmap
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index aaa7819..f34e6a0 100644 (file)
@@ -1,14 +1,18 @@
 nsPrefix: onap
 pullPolicy: Always
 nodePortPrefix: 302
+portalAppsReplicas: 1
+portalDbReplicas: 1
+vncPortalReplicas: 1
+portalWidgetsReplicas: 1
 image:
   readiness: oomk8s/readiness-check:1.0.0
   portalapps: nexus3.onap.org:10001/onap/portal-apps:v1.3.0
   portaldb: nexus3.onap.org:10001/onap/portal-db:v1.3.0
   mariadbClient: oomk8s/mariadb-client-init:1.0.0
   portalwms: nexus3.onap.org:10001/onap/portal-wms:v1.3.0
-  ubuntuInit: oomk8s/ubuntu-init:1.0.0
   ubuntuDesktop: dorowu/ubuntu-desktop-lxde-vnc
   filebeat: docker.elastic.co/beats/filebeat:5.5.0
+  ubuntuInit: oomk8s/ubuntu-init:1.0.0
 onapPortal:
   webappsDir: "/opt/apache-tomcat-8.0.37/webapps"
index c5b55ee..6d2edef 100644 (file)
@@ -1,8 +1,13 @@
 #!/usr/bin/python
-from kubernetes import client, config
-import time, argparse, logging, sys, os
+import getopt
+import logging
+import os
+import sys
+import time
 
-#extract env variables.
+from kubernetes import client
+
+# extract env variables.
 namespace = os.environ['NAMESPACE']
 cert = os.environ['CERT']
 host = os.environ['KUBERNETES_SERVICE_HOST']
@@ -11,12 +16,7 @@ token_path = os.environ['TOKEN']
 with open(token_path, 'r') as token_file:
     token = token_file.read().replace('\n', '')
 
-client.configuration.host = "https://" + host
-client.configuration.ssl_ca_cert = cert
-client.configuration.api_key['authorization'] = token
-client.configuration.api_key_prefix['authorization'] = 'Bearer'
-
-#setup logging
+# setup logging
 log = logging.getLogger(__name__)
 handler = logging.StreamHandler(sys.stdout)
 handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
@@ -24,25 +24,29 @@ handler.setLevel(logging.INFO)
 log.addHandler(handler)
 log.setLevel(logging.INFO)
 
+configuration = client.Configuration()
+configuration.host = "https://" + host
+configuration.ssl_ca_cert = cert
+configuration.api_key['authorization'] = token
+configuration.api_key_prefix['authorization'] = 'Bearer'
+coreV1Api = client.CoreV1Api(client.ApiClient(configuration))
 
 def is_ready(container_name):
-    log.info( "Checking if " + container_name + "  is ready")
-    # config.load_kube_config() # for local testing
-    # namespace='onap-sdc' # for local testing
-    v1 = client.CoreV1Api()
-
     ready = False
-
+    log.info("Checking if " + container_name + "  is ready")
     try:
-        response = v1.list_namespaced_pod(namespace=namespace, watch=False)
+        response = coreV1Api.list_namespaced_pod(namespace=namespace, watch=False)
         for i in response.items:
+            # container_statuses can be None, which is non-iterable.
+            if i.status.container_statuses is None:
+                continue
             for s in i.status.container_statuses:
                 if s.name == container_name:
                     ready = s.ready
                     if not ready:
-                        log.info( container_name + " is not ready.")
+                        log.info(container_name + " is not ready.")
                     else:
-                        log.info( container_name + " is ready!")
+                        log.info(container_name + " is ready!")
                 else:
                     continue
         return ready
@@ -50,27 +54,49 @@ def is_ready(container_name):
         log.error("Exception when calling list_namespaced_pod: %s\n" % e)
 
 
-def main(args):
+DEF_TIMEOUT = 10
+DESCRIPTION = "Kubernetes container readiness check utility"
+USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> [-c <container_name> ...]\n" \
+        "where\n" \
+        "<timeout> - wait for container readiness timeout in min, default is " + str(DEF_TIMEOUT) + "\n" \
+        "<container_name> - name of the container to wait for\n"
+
+def main(argv):
     # args are a list of container names
-    for container_name in args:
-        # 5 min, TODO: make configurable
-        timeout = time.time() + 60 * 10
+    container_names = []
+    timeout = DEF_TIMEOUT
+    try:
+        opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", "timeout=", "help"])
+        for opt, arg in opts:
+            if opt in ("-h", "--help"):
+                print("%s\n\n%s" % (DESCRIPTION, USAGE))
+                sys.exit()
+            elif opt in ("-c", "--container-name"):
+                container_names.append(arg)
+            elif opt in ("-t", "--timeout"):
+                timeout = float(arg)
+    except (getopt.GetoptError, ValueError) as e:
+        print("Error parsing input parameters: %s\n" % e)
+        print(USAGE)
+        sys.exit(2)
+    if container_names.__len__() == 0:
+        print("Missing required input parameter(s)\n")
+        print(USAGE)
+        sys.exit(2)
+
+    for container_name in container_names:
+        timeout = time.time() + timeout * 60
         while True:
             ready = is_ready(container_name)
             if ready is True:
                 break
             elif time.time() > timeout:
-                log.warning( "timed out waiting for '" + container_name + "' to be ready")
+                log.warning("timed out waiting for '" + container_name + "' to be ready")
                 exit(1)
             else:
                 time.sleep(5)
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Process some names.')
-    parser.add_argument('--container-name', action='append', required=True, help='A container name')
-    args = parser.parse_args()
-    arg_dict = vars(args)
+    main(sys.argv[1:])
 
-    for arg in arg_dict.itervalues():
-        main(arg)
index 2836fca..251388b 100755 (executable)
@@ -30,6 +30,9 @@ function usage
        echo " "
        echo "       demo.sh deleteVNF <module_name from instantiateVFW>"
     echo "               - Delete the module created by instantiateVFW"
+       echo " "
+       echo "       demo.sh heatbridge <stack_name> <service_instance_id> <service>"
+    echo "               - Run heatbridge against the stack for the given service instance and service"
 }
 
 # Set the defaults
@@ -113,6 +116,20 @@ do
                                echo "Cache file ${VARFILE} is not found"
                                exit
                        fi
+      shift
+                       ;;
+       heatbridge)
+                       TAG="heatbridge"
+                       shift
+                       if [ $# -ne 3 ];then
+                               echo "Usage: demo.sh heatbridge <stack_name> <service_instance_id> <service>"
+                               exit
+                       fi
+                       VARIABLES="$VARIABLES -v HB_STACK:$1"
+                       shift
+                       VARIABLES="$VARIABLES -v HB_SERVICE_INSTANCE_ID:$1"
+                       shift
+                       VARIABLES="$VARIABLES -v HB_SERVICE:$1"
                        shift
                        ;;
        *)
diff --git a/kubernetes/sdnc/resources/config/dmaap/dhcpalert.properties b/kubernetes/sdnc/resources/config/dmaap/dhcpalert.properties
new file mode 100644 (file)
index 0000000..34fceab
--- /dev/null
@@ -0,0 +1,35 @@
+TransportType=HTTPAUTH
+Latitude =50.000000
+Longitude =-100.000000
+Version =1.0
+ServiceName =dmaap-v1.dev.dmaap.dt.saat.acsi.openecomp.org/events
+Environment =TEST
+Partner = BOT_R
+routeOffer=MR1
+SubContextPath =/
+Protocol =http
+MethodType =GET
+username =admin
+password =admin
+contenttype =application/json
+authKey=fxoW4jZrO7mdLWWa:f4KxkoBtToyoEG7suMoV8KhnkwM=
+authDate=2016-02-18T13:57:37-0800
+host=dmaap.onap-message-router:3904
+topic=VCPE-DHCP-EVENT
+group=jmsgrp
+id=sdnc1
+timeout=15000
+limit=1000
+filter=
+AFT_DME2_EXCHANGE_REQUEST_HANDLERS=com.att.nsa.test.PreferredRouteRequestHandler
+AFT_DME2_EXCHANGE_REPLY_HANDLERS=com.att.nsa.test.PreferredRouteReplyHandler
+AFT_DME2_REQ_TRACE_ON=true
+AFT_ENVIRONMENT=AFTUAT
+AFT_DME2_EP_CONN_TIMEOUT=15000
+AFT_DME2_ROUNDTRIP_TIMEOUT_MS=240000
+AFT_DME2_EP_READ_TIMEOUT_MS=50000
+sessionstickinessrequired=NO
+DME2preferredRouterFilePath=/opt/onap/sdnc/data/properties/dmaap-listener.preferredRoute.txt
+sdnc.odl.user=admin
+sdnc.odl.password=admin
+sdnc.odl.url-base=https://sdnhost.onap-sdnc:8443/restconf/operations
\ No newline at end of file
diff --git a/kubernetes/sdnc/resources/config/log/filebeat/log4j/filebeat.yml b/kubernetes/sdnc/resources/config/log/filebeat/log4j/filebeat.yml
new file mode 100644 (file)
index 0000000..79c9a08
--- /dev/null
@@ -0,0 +1,49 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+
+  # The below commented properties are for time-based rolling policy. But as the log4j 1.2x does not support time-based rolling these properties are not set
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  #ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  #clean_inactive: 96h
+
+  #Multiline properties for log4j xml log events
+  multiline.pattern: '</log4j:event>'
+  multiline.negate: true
+  multiline.match: before
+  #multiline.max_lines: 500
+  #multiline.timeout: 5s
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.onap-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
diff --git a/kubernetes/sdnc/resources/config/ueb/ueb-listener.properties b/kubernetes/sdnc/resources/config/ueb/ueb-listener.properties
new file mode 100644 (file)
index 0000000..0ced08b
--- /dev/null
@@ -0,0 +1,21 @@
+org.onap.ccsdk.sli.northbound.uebclient.asdc-address=sdc-be.onap-sdc:8443
+org.onap.ccsdk.sli.northbound.uebclient.consumer-group=sdc-OpenSource-Env1-sdnc-dockero
+org.onap.ccsdk.sli.northbound.uebclient.consumer-id=sdc-COpenSource-Env11-sdnc-dockero
+org.onap.ccsdk.sli.northbound.uebclient.environment-name=AUTO
+org.onap.ccsdk.sli.northbound.uebclient.password=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
+org.onap.ccsdk.sli.northbound.uebclient.user=sdnc
+org.onap.ccsdk.sli.northbound.uebclient.sdnc-user=admin
+org.onap.ccsdk.sli.northbound.uebclient.sdnc-passwd=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
+org.onap.ccsdk.sli.northbound.uebclient.asdc-api-base-url=http://sdnhost.onap-sdnc:8282/restconf/operations/
+org.onap.ccsdk.sli.northbound.uebclient.asdc-api-namespace=org:onap:ccsdk
+org.onap.ccsdk.sli.northbound.uebclient.spool.incoming=/opt/onap/sdnc/ueb-listener/spool/incoming
+org.onap.ccsdk.sli.northbound.uebclient.spool.archive=/opt/onap/sdnc/ueb-listener/spool/archive
+org.onap.ccsdk.sli.northbound.uebclient.polling-interval=30
+org.onap.ccsdk.sli.northbound.uebclient.polling-timeout=15
+org.onap.ccsdk.sli.northbound.uebclient.client-startup-timeout=900
+org.onap.ccsdk.sli.northbound.uebclient.relevant-artifact-types=YANG_XML,VF_LICENSE,TOSCA_CSAR,UCPE_LAYER_2_CONFIGURATION
+org.onap.ccsdk.sli.northbound.uebclient.activate-server-tls-auth=false
+org.onap.ccsdk.sli.northbound.uebclient.keystore-path=
+org.onap.ccsdk.sli.northbound.uebclient.keystore-password=
+org.onap.ccsdk.sli.northbound.uebclient.xslt-path-list=
+org.onap.ccsdk.sli.northbound.uebclient.artifact-map=/opt/onap/sdnc/data/properties/artifact.map
\ No newline at end of file
index e0f8b29..b9ec155 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: sdnc-dgbuilder
   namespace: "{{ .Values.nsPrefix }}-sdnc"
 spec:
+  replicas: {{ .Values.dgbuilderReplicas }}
   selector:
     matchLabels:
       app: sdnc-dgbuilder
diff --git a/kubernetes/sdnc/templates/dmaap-deployment-configmap.yaml b/kubernetes/sdnc/templates/dmaap-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..efd7d6d
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableSdncDmaap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sdnc-dmaap-configmap
+  namespace: {{ .Values.nsPrefix }}-sdnc
+data:
+{{ tpl (.Files.Glob "resources/config/dmaap/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/sdnc/templates/dmaap-deployment.yaml b/kubernetes/sdnc/templates/dmaap-deployment.yaml
new file mode 100644 (file)
index 0000000..eb6f354
--- /dev/null
@@ -0,0 +1,62 @@
+#{{ if not .Values.disableSdncDmaap }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: dmaap-listener
+  namespace: "{{ .Values.nsPrefix }}-sdnc"
+spec:
+  replicas: {{ .Values.dmaapReplicas }}
+  selector:
+    matchLabels:
+      app: dmaap-listener
+  template:
+    metadata:
+      labels:
+        app: dmaap-listener
+      name: dmaap-listener
+    spec:
+      initContainers:
+      - command:
+        - /root/ready.py
+        args:
+        - --container-name
+        - sdnc-db-container
+        - --container-name
+        - sdnc-controller-container
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        image: {{ .Values.image.readiness }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: sdnc-dmaap-readiness
+      containers:
+      - command:
+        - /opt/onap/sdnc/dmaap-listener/bin/start-dmaap-listener.sh
+        env:
+        - name: PROPERTY_DIR
+          value: /opt/onap/sdnc/data/properties
+        - name: SDNC_CONFIG_DIR
+          value: /opt/onap/sdnc/data/properties
+        image: {{ .Values.image.dmaaplistener }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: dmaapp-listener-container
+        volumeMounts:
+        - mountPath: /etc/localtime
+          name: localtime
+          readOnly: true
+        - mountPath: /opt/onap/sdnc/data/properties/dhcpalert.properties
+          subPath: dhcpalert.properties
+          name: dmaap-dhcapalert-config
+      volumes:
+        - name: localtime
+          hostPath:
+            path: /etc/localtime
+        - name: dmaap-dhcapalert-config
+          configMap:
+            name: sdnc-dmaap-configmap
+      imagePullSecrets:
+      - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+#{{ end }}
index 9d3feef..a283d0f 100644 (file)
@@ -5,7 +5,7 @@ metadata:
   name: nfs-provisioner
   namespace: "{{ .Values.nsPrefix }}-sdnc"
 spec:
-  replicas: 1
+  replicas: {{ .Values.nfsReplicas }}
   strategy:
     type: Recreate
   template:
diff --git a/kubernetes/sdnc/templates/sdnc-conf-configmap.yaml b/kubernetes/sdnc/templates/sdnc-conf-configmap.yaml
new file mode 100644 (file)
index 0000000..e7cde3b
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableSdncSdnc }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sdnc-conf-configmap
+  namespace: {{ .Values.nsPrefix }}-sdnc
+data:
+{{ tpl (.Files.Glob "resources/config/conf/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/sdnc/templates/sdnc-log-configmap.yaml b/kubernetes/sdnc/templates/sdnc-log-configmap.yaml
new file mode 100644 (file)
index 0000000..d13daac
--- /dev/null
@@ -0,0 +1,17 @@
+#{{ if not .Values.disableSdncSdnc }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sdnc-log-configmap
+  namespace: {{ .Values.nsPrefix }}-sdnc
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/log4j/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sdnc-logging-cfg-configmap
+  namespace: {{ .Values.nsPrefix }}-sdnc
+data:
+{{ tpl (.Files.Glob "resources/config/log/*").AsConfig . | indent 2 }}
+#{{ end }}
index 3ba37af..02950ba 100644 (file)
@@ -56,14 +56,17 @@ spec:
         - mountPath: /etc/localtime
           name: localtime
           readOnly: true
-        - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
-          name: sdnc-aaiclient-properties
         - mountPath: /opt/onap/sdnc/data/properties/admportal.json
-          name: sdnc-admportal-json
+          name: sdnc-conf
+          subPath: admportal.json
+        - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
+          name: sdnc-conf
+          subPath: aaiclient.properties
         - mountPath: /var/log/onap
           name: sdnc-logs
         - mountPath: /opt/opendaylight/current/etc/org.ops4j.pax.logging.cfg
-          name: sdnc-log-config
+          name: sdnc-logging-cfg-config
+          subPath: org.ops4j.pax.logging.cfg
         ports:
         - containerPort: 8181
         - containerPort: 8101
@@ -80,6 +83,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: sdnc-logs
         - mountPath: /usr/share/filebeat/data
@@ -89,21 +93,25 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/log4j/filebeat.yml
-        - name: sdnc-log-config
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/sdnc/org.ops4j.pax.logging.cfg
+          configMap:
+            name: sdnc-log-configmap
+        - name: sdnc-logging-cfg-config
+          configMap:
+            name: sdnc-logging-cfg-configmap
         - name: sdnc-logs
           emptyDir: {}
         - name: sdnc-data-filebeat
           emptyDir: {}
-        - name: sdnc-aaiclient-properties
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/aaiclient.properties
-        - name: sdnc-admportal-json
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/admportal.json
+        - name: sdnc-conf
+          configMap:
+            name: sdnc-conf-configmap
+            items:
+            - key: admportal.json
+              path: admportal.json
+              mode: 0755
+            - key: aaiclient.properties
+              path: aaiclient.properties
+              mode: 0755
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
diff --git a/kubernetes/sdnc/templates/ueb-deployment-configmap.yaml b/kubernetes/sdnc/templates/ueb-deployment-configmap.yaml
new file mode 100644 (file)
index 0000000..4970de0
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableSdncUeb }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: sdnc-ueb-configmap
+  namespace: {{ .Values.nsPrefix }}-sdnc
+data:
+{{ tpl (.Files.Glob "resources/config/ueb/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/sdnc/templates/ueb-deployment.yaml b/kubernetes/sdnc/templates/ueb-deployment.yaml
new file mode 100644 (file)
index 0000000..c109acd
--- /dev/null
@@ -0,0 +1,62 @@
+#{{ if not .Values.disableSdncUeb }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: ueb-listener
+  namespace: "{{ .Values.nsPrefix }}-sdnc"
+spec:
+  replicas: {{ .Values.uebReplicas }}
+  selector:
+    matchLabels:
+      app: ueb-listener
+  template:
+    metadata:
+      labels:
+        app: ueb-listener
+      name: ueb-listener
+    spec:
+      initContainers:
+      - command:
+        - /root/ready.py
+        args:
+        - --container-name
+        - sdnc-db-container
+        - --container-name
+        - sdnc-controller-container
+        env:
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        image: {{ .Values.image.readiness }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: sdnc-ueb-readiness
+      containers:
+      - command:
+        - /opt/onap/sdnc/ueb-listener/bin/start-ueb-listener.sh
+        env:
+        - name: PROPERTY_DIR
+          value: /opt/onap/sdnc/data/properties
+        - name: SDNC_CONFIG_DIR
+          value: /opt/onap/sdnc/data/properties
+        image: {{ .Values.image.ueblistener }}
+        imagePullPolicy: {{ .Values.pullPolicy }}
+        name: ueb-listener-container
+        volumeMounts:
+        - mountPath: /etc/localtime
+          name: localtime
+          readOnly: true
+        - mountPath: /opt/onap/sdnc/data/properties/ueb-listener.properties
+          subPath: ueb-listener.properties
+          name: ueb-config
+      volumes:
+        - name: localtime
+          hostPath:
+            path: /etc/localtime
+        - name: ueb-config
+          configMap:
+            name: sdnc-ueb-configmap
+      imagePullSecrets:
+      - name: "{{ .Values.nsPrefix }}-docker-registry-key"
+#{{ end }}
index fcd82a4..73d8e0a 100644 (file)
@@ -5,6 +5,7 @@ metadata:
   name: sdnc-portal
   namespace: "{{ .Values.nsPrefix }}-sdnc"
 spec:
+  replicas: {{ .Values.portalReplicas }}
   selector:
     matchLabels:
       app: sdnc-portal
@@ -50,10 +51,8 @@ spec:
         - name: localtime
           mountPath: /etc/localtime
           readOnly: true
-        - mountPath: /opt/onap/sdnc/data/properties/aaiclient.properties
-          name: sdnc-aaiclient-properties
-        - mountPath: /opt/onap/sdnc/data/properties/admportal.json
-          name: sdnc-admportal-json
+        - mountPath: /opt/onap/sdnc/data/properties/
+          name: sdnc-conf
         readinessProbe:
           tcpSocket:
             port: 8843
@@ -64,12 +63,9 @@ spec:
       - name: localtime
         hostPath:
           path: /etc/localtime
-      - name: sdnc-aaiclient-properties
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/aaiclient.properties
-      - name: sdnc-admportal-json
-        hostPath:
-          path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/conf/admportal.json
+      - name: sdnc-conf
+        configMap:
+          name: sdnc-conf-configmap
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index 5272b65..e91dc2a 100644 (file)
@@ -8,14 +8,16 @@ image:
   dgbuilderSdnc: nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:v0.1.0
   sdnc: nexus3.onap.org:10001/onap/sdnc-image:v1.2.1
   admportalSdnc: nexus3.onap.org:10001/onap/admportal-sdnc-image:v1.2.1
+  ueblistener: nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:v1.2.1
+  dmaaplistener: nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:v1.2.1
   filebeat: docker.elastic.co/beats/filebeat:5.5.0
 enableODLCluster: false
 numberOfODLReplicas: 1
 numberOfDbReplicas: 1
+dgbuilderReplicas: 1
+dmaapReplicas: 1
+nfsReplicas: 1
+uebReplicas: 1
+portalReplicas: 1
 disableSdncSdncDgbuilder: false
 disableSdncSdncPortal: false
-
-# FIXME
-# Missing the following deps
-# nexus3.onap.org:10001/onap/sdnc-dmaap-listener-image:v1.2.1
-# nexus3.onap.org:10001/onap/sdnc-ueb-listener-image:v1.2.1
diff --git a/kubernetes/vid/resources/config/log/filebeat/filebeat.yml b/kubernetes/vid/resources/config/log/filebeat/filebeat.yml
new file mode 100644 (file)
index 0000000..f316b86
--- /dev/null
@@ -0,0 +1,41 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+  #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+  paths:
+    - /var/log/onap/*/*/*/*.log
+    - /var/log/onap/*/*/*.log
+    - /var/log/onap/*/*.log
+  #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+  ignore_older: 48h
+  # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+  clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+  #List of logstash server ip addresses with port number.
+  #But, in our case, this will be the loadbalancer IP address.
+  #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+  hosts: ["logstash.onap-log:5044"]
+  #If enable will do load balancing among availabe Logstash, automatically.
+  loadbalance: true
+
+  #The list of root certificates for server verifications.
+  #If certificate_authorities is empty or not set, the trusted
+  #certificate authorities of the host system are used.
+  #ssl.certificate_authorities: $ssl.certificate_authorities
+
+  #The path to the certificate for SSL client authentication. If the certificate is not specified,
+  #client authentication is not available.
+  #ssl.certificate: $ssl.certificate
+
+  #The client certificate key used for client authentication.
+  #ssl.key: $ssl.key
+
+  #The passphrase used to decrypt an encrypted key stored in the configured key file
+  #ssl.key_passphrase: $ssl.key_passphrase
diff --git a/kubernetes/vid/templates/vid-lfconfig-configmap.yaml b/kubernetes/vid/templates/vid-lfconfig-configmap.yaml
new file mode 100644 (file)
index 0000000..1cc3f21
--- /dev/null
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableVidVidMariadb }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vid-lfconfig-configmap
+  namespace: {{ .Values.nsPrefix }}-vid
+data:
+{{ tpl (.Files.Glob "resources/config/lf_config/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/vid/templates/vid-log-configmap.yaml b/kubernetes/vid/templates/vid-log-configmap.yaml
new file mode 100644 (file)
index 0000000..00481d2
--- /dev/null
@@ -0,0 +1,17 @@
+#{{ if not .Values.disableVidVidServer }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vid-log-configmap
+  namespace: {{ .Values.nsPrefix }}-vid
+data:
+{{ tpl (.Files.Glob "resources/config/log/vid/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vid-filebeat-configmap
+  namespace: {{ .Values.nsPrefix }}-vid
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
+#{{ end }}
index 55ef5da..bf23c3f 100644 (file)
@@ -7,6 +7,7 @@ metadata:
   name: vid-mariadb
   namespace: "{{ .Values.nsPrefix }}-vid"
 spec:
+  replicas: {{ .Values.vidMariaDbReplicas }}
   selector:
     matchLabels:
       app: vid-mariadb
@@ -36,9 +37,11 @@ spec:
         - mountPath: /var/lib/mysql
           name: vid-mariadb-data
         - mountPath: /docker-entrypoint-initdb.d/vid-pre-init.sql
-          name: vid-pre-init
+          name: vid-lfconfig
+          subPath: vid-pre-init.sql
         - mountPath: /etc/mysql/my.cnf
-          name: my-cnf
+          name: vid-lfconfig
+          subPath: my.cnf
         ports:
         - containerPort: 3306
         readinessProbe:
@@ -53,12 +56,15 @@ spec:
         - name: vid-mariadb-data
           persistentVolumeClaim:
             claimName: vid-db
-        - name: vid-pre-init
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/vid/lf_config/vid-pre-init.sql
-        - name: my-cnf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/vid/lf_config/vid-my.cnf
+        - name: vid-lfconfig
+          configMap:
+            name: vid-lfconfig-configmap
+            defaultMode: 0755
+            items:
+            - key: vid-my.cnf
+              path: my.cnf
+            - key: vid-pre-init.sql
+              path: vid-pre-init.sql
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index f513a87..3315d09 100644 (file)
@@ -13,7 +13,7 @@ spec:
     - ReadWriteMany
   persistentVolumeReclaimPolicy: Retain
   hostPath:
-    path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/mariadb/data
+    path: {{ .Values.dataRootDir }}/{{ .Values.nsPrefix }}/vid/mariadb/data
 ---
 kind: PersistentVolumeClaim
 apiVersion: v1
@@ -29,4 +29,4 @@ spec:
   selector:
     matchLabels:
       name: "{{ .Values.nsPrefix }}-vid-db"
-#{{ end }}
\ No newline at end of file
+#{{ end }}
index 832fdeb..7f9cb65 100644 (file)
@@ -7,6 +7,7 @@ metadata:
   name: vid-server
   namespace: "{{ .Values.nsPrefix }}-vid"
 spec:
+  replicas: {{ .Values.vidServerReplicas }}
   selector:
     matchLabels:
       app: vid-server
@@ -73,7 +74,7 @@ spec:
           value: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
         - name: VID_MYSQL_MAXCONNECTIONS
           value: "5"
-        image: {{ .Values.image.vid }} 
+        image: {{ .Values.image.vid }}
         imagePullPolicy: {{ .Values.pullPolicy }}
         name: vid-server
         lifecycle:
@@ -90,6 +91,7 @@ spec:
           name: vid-logs
         - mountPath: /tmp/logback.xml
           name: vid-logback
+          subPath: logback.xml
         readinessProbe:
           tcpSocket:
             port: 8080
@@ -101,6 +103,7 @@ spec:
         volumeMounts:
         - mountPath: /usr/share/filebeat/filebeat.yml
           name: filebeat-conf
+          subPath: filebeat.yml
         - mountPath: /var/log/onap
           name: vid-logs
         - mountPath: /usr/share/filebeat/data
@@ -110,15 +113,15 @@ spec:
           hostPath:
             path: /etc/localtime
         - name: filebeat-conf
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+          configMap:
+            name: vid-filebeat-configmap
         - name: vid-logs
           emptyDir: {}
         - name: vid-data-filebeat
           emptyDir: {}
         - name: vid-logback
-          hostPath:
-            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/vid/logback.xml
+          configMap:
+            name: vid-log-configmap
       imagePullSecrets:
       - name: "{{ .Values.nsPrefix }}-docker-registry-key"
 #{{ end }}
index 9b1748f..6463588 100644 (file)
@@ -1,8 +1,11 @@
 nsPrefix: onap
 pullPolicy: Always
 nodePortPrefix: 302
+dataRootDir: /dockerdata-nfs
+vidMariaDbReplicas: 1
+vidServerReplicas: 1
 image:
   readiness: oomk8s/readiness-check:1.0.0
   mariadb: nexus3.onap.org:10001/library/mariadb:10
   vid: nexus3.onap.org:10001/openecomp/vid:v1.1.1
-  filebeat: docker.elastic.co/beats/filebeat:5.5.0
\ No newline at end of file
+  filebeat: docker.elastic.co/beats/filebeat:5.5.0