--- ################################### # Resources configuration entries # ################################### # Resource host information # folder on resource host where tars with resources are present resources_dir: # tarfile name within this folder with offline infrastructure sw resources_filename: # the purpose of auxiliary resources is to provide user an interface # of how to distribute to infra node another big tar which might be # usefull later on in application playbooks, optional param aux_resources_filename: # resources can be exported via nfs # default is no - client will use ssh # if set yes but nfs-utils is missing then fallback to ssh resources_on_nfs: no # Infra node specific information # offline solution source data binaries will be decompressed in following dir on infra # e.g. app_data_path: /opt/onap app_data_path: # additional data path for auxiliary data transfer # e.g. aux_data_path: /opt/onap/onap_me_docker_images aux_data_path: ########################################## # Offline Infrastructure specific params # ########################################## # information from which rootCA is created # e.g. # organization_name: Samsung # state_or_province_name: Poland # country_name: PL # locality_name: Krakow certificates: organization_name: state_or_province_name: country_name: locality_name: # Force k8s cluster redeploy if it exists already # Default value is to allow redeploy redeploy_k8s_env: yes # Distribute offline rpm repository # Default value is to distribute rpm deploy_rpm_repository: yes # Offline solution is deploying app specific rpm repository and requires some name # also for k8s cluster # e.g. app_name: ONAP app_name: # as nexus blob is prepopulated during build time following block # of runtime_images code provides an alternative way how to insert # specified images into nexus during infrastructure playbook execution # images specified in there must be available inside aux_resources_filename # tar file # if runtime_images are not specified nothing is inserted on top of existing # prebuilt nexus blob in installation time # Component name must match with tar filename # e.g. # aaa-component-0.0.1.tar is expected in aux_data_path for aaa-component image #runtime_images: # aaa-component-0.0.1: # registry: "nexus3.onap.org:10001" # path: "/onap/components/aaa-component" # tag: "latest" runtime_images: ############################### # Application specific params # ############################### # Project name to utilize same codebase # e.g. project_configuration: onap-me project_configuration: # App Helm charts dir. E.g. application/helm_charts/ where xxx is a charts folder name. # Helm charts are expected to be inside SW package somewhere inside ./ansible/application # those will be available for offline installer under /ansible/application/ # for OOM project helm charts are usually within kubernetes sub-folder # so the path for them can be: # e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes" app_helm_charts_install_directory: # to specify target dir where helm charts should be copied into on infra node # this should be directory with all charts and Makefile # e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" app_helm_charts_infra_directory: # Main Helm chart to install # e.g. app_helm_chart_name: onap app_helm_chart_name: # Targets for helm charts repository build # e.g. for ONAP Casablanca # app_helm_build_targets: # - all # - onap app_helm_build_targets: # Directory with helm plugins # It's an optional parameter used e.g. in OOM Casablanca # app_helm_plugins_directory: "{{ app_helm_charts_infra_directory}}/kubernetes/helm/plugins/" app_helm_plugins_directory: # Helm release name (visible in POD names) used by Helm # e.g. app_helm_release_name: "{{ project_configuration }}" app_helm_release_name: # Kubernetes namespace where application is installed # e.g. app_kubernetes_namespace: onap app_kubernetes_namespace: # Optional application custom Ansible roles name for pre and post install logic. # Location of additional custom roles is defined in ansible.cfg with roles_path. # e.g. application_pre_install_role: "{{ project_configuration }}-patch-role" application_pre_install_role: application_post_install_role: # any other application specific params can be specified in this file # e.g. # onap_values: # openStackKeyStoneUrl: "http://1.2.3.4:5000" # openStackServiceTenantName: "services" # openStackDomain: "Default" # openStackUserName: "admin" # openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"