2 ###################################
3 # Resources configuration entries #
4 ###################################
6 # Resource host information
8 # Directory on resource host where tars with resources are present
11 # tarfile name within resources_dir directory with offline infrastructure binaries.
12 # Content of APP_BINARY_RESOURCES_DIR (defined in package.conf) packaged by package.sh to single tar file.
15 # tarfile name within resources_dir directory with auxiliary resources.
16 # Content of APP_AUX_BINARIES (defined in package.conf) packaged by package.sh to single tar file.
17 # the purpose of auxiliary resources is to provide user an interface
18 # to distribute to infra node tar file with application specific files.
19 # Docker images in tar format are currently the only supported content of aux_resources package.
20 aux_resources_filename:
22 # resources can be exported via nfs
23 # default is no - client will use ssh
24 # if set yes but nfs-utils is missing then fallback to ssh
27 # Infra node specific information
29 # Offline solution source data binaries (resources_filename tar) will be
30 # decompressed in this directory on target infra server.
31 # e.g. app_data_path: /opt/onap
34 # Path for auxiliary data in target infra server.
35 # Data from resource host defined by aux_resources_filename variable is placed to this directory.
36 # Currently docker images in tar format are supported (see runtime_images parameter).
37 # Could be used for other kind of application specific data also.
38 # e.g. aux_data_path: /opt/onap/my_extra_pods_docker_images
39 aux_data_path: "{{ app_data_path }}/runtime_images_source_dir"
42 ##########################################
43 # Offline Infrastructure specific params #
44 ##########################################
46 # information from which rootCA is created
48 # organization_name: Samsung
49 # state_or_province_name: Poland
51 # locality_name: Krakow
54 state_or_province_name:
58 # Force k8s cluster redeploy if it exists already
59 # Default value is to allow redeploy
62 # Distribute offline software package (rpm,apt) repository
63 deploy_package_repository: yes
65 # Offline solution is deploying app specific rpm repository and requires some name
66 # also for k8s cluster
70 # runtime_images provides an way to insert docker images
71 # into nexus during infrastructure playbook execution (populated to nexus at runtime).
72 # images specified must be available inside aux_resources_filename
73 # tar file that is extracted by installer into aux_data_path directory in infra server.
74 # Source format of an image is .tar file in aux_data_path directory and all .tar
75 # files in that dir are checked to match runtime_images definition.
76 # if runtime_images are not specified nothing is inserted on top of existing
77 # prebuilt nexus blob in installation time.
78 # Component name must match with tar filename!
80 # aaa-component-0.0.1.tar is expected in aux_data_path for aaa-component image
82 # aaa-component-0.0.1:
83 # registry: "nexus3.onap.org:10001"
84 # path: "/onap/components/aaa-component"
88 ###############################
89 # Application specific params #
90 ###############################
92 # App Helm charts directory location in installation package.
93 # The path is absolute path (even locates relative inside of this sw package
94 # installation folder) because it must be visible for ansible docker/chroot
95 # process to find directory and to transfer it into machine (infra node) running
97 # Content of the folder must be Helm chart directories of the app with Makefile.
98 # In case of ONAP OOM it would be <oom_repo>/kubernetes folder content.
99 # NOTE: This default value should not be changed if not really needed and it
100 # must match with the variable "HELM_CHARTS_DIR_IN_PACKAGE" value in package.sh
102 app_helm_charts_install_directory: "/ansible/application/helm_charts"
104 # Specify target dir where helm charts are copied into on infra node.
105 # (same as content of "app_helm_charts_install_directory" copied by installer to this dir.)
106 # This must be directory with all charts and Makefile.
107 # e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
108 app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
110 # Main Helm chart to install
111 # e.g. app_helm_chart_name: onap
112 app_helm_chart_name: "{{ app_name }}"
114 # Targets for helm charts repository build
115 # e.g. for ONAP Casablanca
116 # app_helm_build_targets:
119 app_helm_build_targets:
121 # Directory with helm plugins
122 # It's an optional parameter used e.g. in OOM Casablanca
123 # app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"
124 app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"
126 # Helm release name (visible in POD names) used by Helm
127 # e.g. app_helm_release_name: onap
128 app_helm_release_name: "{{ app_name }}"
130 # Kubernetes namespace where application is installed
131 # e.g. app_kubernetes_namespace: onap
132 app_kubernetes_namespace: "{{ app_name }}"
134 # Optional application custom Ansible roles name for pre and post install logic.
135 # Location of additional custom roles is defined in ansible.cfg with roles_path.
136 # e.g. application_pre_install_role: "{{ app_name }}-patch-role"
137 application_pre_install_role:
138 application_post_install_role:
140 # any other application specific params can be specified in this file
143 # openStackKeyStoneUrl: "http://1.2.3.4:5000"
144 # openStackServiceTenantName: "services"
145 # openStackDomain: "Default"
146 # openStackUserName: "admin"
147 # openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"