Replace virtlet with kubevirt in plugin_fw_v2
[multicloud/k8s.git] / kud / tests / _common.sh
1 #!/bin/bash
2 # SPDX-license-identifier: Apache-2.0
3 ##############################################################################
4 # Copyright (c) 2018
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 ##############################################################################
10
11 set -o errexit
12 set -o nounset
13 set -o pipefail
14
15 packetgen_deployment_name=packetgen
16 sink_deployment_name=sink
17 firewall_deployment_name=firewall
18 image_name=virtlet.cloud/ubuntu/16.04
19 kubevirt_image=integratedcloudnative/fedora:33
20 multus_deployment_name=multus-deployment
21 virtlet_image=virtlet.cloud/fedora
22 virtlet_deployment_name=virtlet-deployment
23 kubevirt_vmi_name=kubevirt-vmi
24 plugin_deployment_name=plugin-deployment
25 plugin_service_name=plugin-service
26 ovn4nfv_deployment_name=ovn4nfv-deployment
27 onap_private_net=onap-private-net
28 unprotected_private_net=unprotected-private-net
29 protected_private_net=protected-private-net
30 ovn_multus_network_name=ovn-networkobj
31 rbd_metadata=rbd_metatada.json
32 rbp_metadata=rbp_metatada.json
33 rbp_instance=rbp_instance.json
34
35 # vFirewall vars
36 demo_artifacts_version=1.5.0
37 vfw_private_ip_0='192.168.10.3'
38 vfw_private_ip_1='192.168.20.2'
39 vfw_private_ip_2='10.10.100.3'
40 vpg_private_ip_0='192.168.10.2'
41 vpg_private_ip_1='10.0.100.2'
42 vsn_private_ip_0='192.168.20.3'
43 vsn_private_ip_1='10.10.100.4'
44 dcae_collector_ip='10.0.4.1'
45 dcae_collector_port='8081'
46 protected_net_gw='192.168.20.100'
47 protected_net_cidr='192.168.20.0/24'
48 protected_private_net_cidr='192.168.10.0/24'
49 onap_private_net_cidr='10.10.0.0/16'
50 sink_ipaddr='192.168.20.250'
51 multus_private_net_cidr='10.20.0.0/16'
52
53 # populate_CSAR_containers_vFW() - This function creates the content of CSAR file
54 # required for vFirewal using only containers
55 function populate_CSAR_containers_vFW {
56     local csar_id=$1
57
58     _checks_args $csar_id
59     pushd ${CSAR_DIR}/${csar_id}
60
61     cat << META > metadata.yaml
62 resources:
63   network:
64     - $unprotected_private_net.yaml
65     - $protected_private_net.yaml
66     - $onap_private_net.yaml
67   deployment:
68     - $packetgen_deployment_name.yaml
69     - $firewall_deployment_name.yaml
70     - $sink_deployment_name.yaml
71 META
72
73     cat << NET > $unprotected_private_net.yaml
74 apiVersion: "k8s.cni.cncf.io/v1"
75 kind: NetworkAttachmentDefinition
76 metadata:
77   name: $unprotected_private_net
78 spec:
79   config: '{
80     "name": "unprotected",
81     "type": "bridge",
82     "ipam": {
83         "type": "host-local",
84         "subnet": "$protected_private_net_cidr"
85     }
86 }'
87 NET
88
89     cat << NET > $protected_private_net.yaml
90 apiVersion: "k8s.cni.cncf.io/v1"
91 kind: NetworkAttachmentDefinition
92 metadata:
93   name: $protected_private_net
94 spec:
95   config: '{
96     "name": "protected",
97     "type": "bridge",
98     "ipam": {
99         "type": "host-local",
100         "subnet": "$protected_net_cidr"
101     }
102 }'
103 NET
104
105     cat << NET > $onap_private_net.yaml
106 apiVersion: "k8s.cni.cncf.io/v1"
107 kind: NetworkAttachmentDefinition
108 metadata:
109   name: $onap_private_net
110 spec:
111   config: '{
112     "name": "onap",
113     "type": "bridge",
114     "ipam": {
115         "type": "host-local",
116         "subnet": "$onap_private_net_cidr"
117     }
118 }'
119 NET
120
121     cat << DEPLOYMENT > $packetgen_deployment_name.yaml
122 apiVersion: apps/v1
123 kind: Deployment
124 metadata:
125   name: $packetgen_deployment_name
126   labels:
127     app: vFirewall
128 spec:
129   replicas: 1
130   selector:
131     matchLabels:
132       app: vFirewall
133   template:
134     metadata:
135       labels:
136         app: vFirewall
137       annotations:
138         k8s.v1.cni.cncf.io/networks: '[
139             { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
140             { "name": "$onap_private_net", "interfaceRequest": "eth2" }
141         ]'
142     spec:
143       containers:
144       - name: $packetgen_deployment_name
145         image: electrocucaracha/packetgen
146         imagePullPolicy: IfNotPresent
147         tty: true
148         stdin: true
149         resources:
150           limits:
151             memory: 256Mi
152 DEPLOYMENT
153
154     cat << DEPLOYMENT > $firewall_deployment_name.yaml
155 apiVersion: apps/v1
156 kind: Deployment
157 metadata:
158   name: $firewall_deployment_name
159   labels:
160     app: vFirewall
161 spec:
162   replicas: 1
163   selector:
164     matchLabels:
165       app: vFirewall
166   template:
167     metadata:
168       labels:
169         app: vFirewall
170       annotations:
171         k8s.v1.cni.cncf.io/networks: '[
172             { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
173             { "name": "$protected_private_net", "interfaceRequest": "eth2" },
174             { "name": "$onap_private_net", "interfaceRequest": "eth3" }
175         ]'
176     spec:
177       containers:
178       - name: $firewall_deployment_name
179         image: electrocucaracha/firewall
180         imagePullPolicy: IfNotPresent
181         tty: true
182         stdin: true
183 DEPLOYMENT
184
185     cat << DEPLOYMENT > $sink_deployment_name.yaml
186 apiVersion: apps/v1
187 kind: Deployment
188 metadata:
189   name: $sink_deployment_name
190   labels:
191     app: vFirewall
192 spec:
193   replicas: 1
194   selector:
195     matchLabels:
196       app: vFirewall
197       context: darkstat
198   template:
199     metadata:
200       labels:
201         app: vFirewall
202         context: darkstat
203       annotations:
204         k8s.v1.cni.cncf.io/networks: '[
205             { "name": "$protected_private_net", "interfaceRequest": "eth1" },
206             { "name": "$onap_private_net", "interfaceRequest": "eth2" }
207         ]'
208     spec:
209       containers:
210       - name: $sink_deployment_name
211         image: electrocucaracha/sink
212         imagePullPolicy: IfNotPresent
213         tty: true
214         stdin: true
215         securityContext:
216           privileged: true
217       - name: darkstat
218         image: electrocucaracha/darkstat
219         imagePullPolicy: IfNotPresent
220         tty: true
221         stdin: true
222         ports:
223           - containerPort: 667
224 DEPLOYMENT
225     popd
226 }
227
228 # populate_CSAR_vms_containers_vFW() - This function creates the content of CSAR file
229 # required for vFirewal using an hybrid combination between virtual machines and
230 # cotainers
231 function populate_CSAR_vms_containers_vFW {
232     local csar_id=$1
233     ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
234
235     _checks_args $csar_id
236     pushd ${CSAR_DIR}/${csar_id}
237
238     cat << META > metadata.yaml
239 resources:
240   network:
241     - onap-ovn4nfvk8s-network.yaml
242   onapNetwork:
243     - $unprotected_private_net.yaml
244     - $protected_private_net.yaml
245     - $onap_private_net.yaml
246   deployment:
247     - $packetgen_deployment_name.yaml
248     - $firewall_deployment_name.yaml
249     - $sink_deployment_name.yaml
250   service:
251     - sink-service.yaml
252 META
253
254     cat << SERVICE > sink-service.yaml
255 apiVersion: v1
256 kind: Service
257 metadata:
258   name: sink-service
259 spec:
260   type: NodePort
261   ports:
262   - port: 667
263     nodePort: 30667
264   selector:
265     app: vFirewall
266     context: darkstat
267 SERVICE
268
269     cat << MULTUS_NET > onap-ovn4nfvk8s-network.yaml
270 apiVersion: "k8s.cni.cncf.io/v1"
271 kind: NetworkAttachmentDefinition
272 metadata:
273   name: $ovn_multus_network_name
274 spec:
275   config: '{
276       "cniVersion": "0.3.1",
277       "name": "ovn4nfv-k8s-plugin",
278       "type": "ovn4nfvk8s-cni"
279     }'
280 MULTUS_NET
281
282     cat << NET > $unprotected_private_net.yaml
283 apiVersion: k8s.plugin.opnfv.org/v1alpha1
284 kind: Network
285
286 metadata:
287   name: $unprotected_private_net
288 spec:
289   cniType : ovn4nfv
290   ipv4Subnets:
291   - subnet: $protected_private_net_cidr
292     name: subnet1
293     gateway: 192.168.10.1/24
294 NET
295
296     cat << NET > $protected_private_net.yaml
297 apiVersion: k8s.plugin.opnfv.org/v1alpha1
298 kind: Network
299 metadata:
300   name: $protected_private_net
301 spec:
302   cniType : ovn4nfv
303   ipv4Subnets:
304   - subnet: $protected_net_cidr
305     name: subnet1
306     gateway: $protected_net_gw/24
307 NET
308
309     cat << NET > $onap_private_net.yaml
310 apiVersion: k8s.plugin.opnfv.org/v1alpha1
311 kind: Network
312 metadata:
313   name: $onap_private_net
314 spec:
315   cniType : ovn4nfv
316   ipv4Subnets:
317   - subnet: $onap_private_net_cidr
318     name: subnet1
319     gateway: 10.10.0.1/16
320 NET
321
322     proxy="apt:"
323     cloud_init_proxy="
324             - export demo_artifacts_version=$demo_artifacts_version
325             - export vfw_private_ip_0=$vfw_private_ip_0
326             - export vsn_private_ip_0=$vsn_private_ip_0
327             - export protected_net_cidr=$protected_net_cidr
328             - export dcae_collector_ip=$dcae_collector_ip
329             - export dcae_collector_port=$dcae_collector_port
330             - export protected_net_gw=$protected_net_gw
331             - export protected_private_net_cidr=$protected_private_net_cidr
332             - export sink_ipaddr=$sink_ipaddr
333 "
334     if [[ -n "${http_proxy+x}" ]]; then
335         proxy+="
336             http_proxy: $http_proxy"
337         cloud_init_proxy+="
338             - export http_proxy=$http_proxy"
339     fi
340     if [[ -n "${https_proxy+x}" ]]; then
341         proxy+="
342             https_proxy: $https_proxy"
343         cloud_init_proxy+="
344             - export https_proxy=$https_proxy"
345     fi
346     if [[ -n "${no_proxy+x}" ]]; then
347         cloud_init_proxy+="
348             - export no_proxy=$no_proxy"
349     fi
350
351     cat << DEPLOYMENT > $packetgen_deployment_name.yaml
352 apiVersion: apps/v1
353 kind: Deployment
354 metadata:
355   name: $packetgen_deployment_name
356   labels:
357     app: vFirewall
358 spec:
359   replicas: 1
360   selector:
361     matchLabels:
362       app: vFirewall
363   template:
364     metadata:
365       labels:
366         app: vFirewall
367       annotations:
368         VirtletLibvirtCPUSetting: |
369           mode: host-model
370         VirtletCloudInitUserData: |
371           ssh_pwauth: True
372           users:
373           - name: admin
374             gecos: User
375             primary-group: admin
376             groups: users
377             sudo: ALL=(ALL) NOPASSWD:ALL
378             lock_passwd: false
379             # the password is "admin"
380             passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
381             ssh_authorized_keys:
382               $ssh_key
383           $proxy
384           runcmd:
385           $cloud_init_proxy
386             - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/$packetgen_deployment_name | sudo -E bash
387         VirtletSSHKeys: |
388           $ssh_key
389         VirtletRootVolumeSize: 5Gi
390         k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
391         k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [
392             { "name": "$unprotected_private_net", "ipAddress": "$vpg_private_ip_0", "interface": "eth1" , "defaultGateway": "false"},
393             { "name": "$onap_private_net", "ipAddress": "$vpg_private_ip_1", "interface": "eth2" , "defaultGateway": "false"}
394         ]}'
395         kubernetes.io/target-runtime: virtlet.cloud
396     spec:
397       affinity:
398         nodeAffinity:
399           requiredDuringSchedulingIgnoredDuringExecution:
400             nodeSelectorTerms:
401             - matchExpressions:
402               - key: extraRuntime
403                 operator: In
404                 values:
405                 - virtlet
406       containers:
407       - name: $packetgen_deployment_name
408         image: $image_name
409         imagePullPolicy: IfNotPresent
410         tty: true
411         stdin: true
412         ports:
413           - containerPort: 8183
414         resources:
415           limits:
416             memory: 4Gi
417 DEPLOYMENT
418
419     cat << DEPLOYMENT > $firewall_deployment_name.yaml
420 apiVersion: apps/v1
421 kind: Deployment
422 metadata:
423   name: $firewall_deployment_name
424   labels:
425     app: vFirewall
426 spec:
427   replicas: 1
428   selector:
429     matchLabels:
430       app: vFirewall
431   template:
432     metadata:
433       labels:
434         app: vFirewall
435       annotations:
436         VirtletLibvirtCPUSetting: |
437           mode: host-model
438         VirtletCloudInitUserData: |
439           ssh_pwauth: True
440           users:
441           - name: admin
442             gecos: User
443             primary-group: admin
444             groups: users
445             sudo: ALL=(ALL) NOPASSWD:ALL
446             lock_passwd: false
447             # the password is "admin"
448             passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
449             ssh_authorized_keys:
450               $ssh_key
451           $proxy
452           runcmd:
453             $cloud_init_proxy
454             - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/$firewall_deployment_name | sudo -E bash
455         VirtletSSHKeys: |
456           $ssh_key
457         VirtletRootVolumeSize: 5Gi
458         k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
459         k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [
460             { "name": "$unprotected_private_net", "ipAddress": "$vfw_private_ip_0", "interface": "eth1" , "defaultGateway": "false"},
461             { "name": "$protected_private_net", "ipAddress": "$vfw_private_ip_1", "interface": "eth2", "defaultGateway": "false" },
462             { "name": "$onap_private_net", "ipAddress": "$vfw_private_ip_2", "interface": "eth3" , "defaultGateway": "false"}
463         ]}'
464         kubernetes.io/target-runtime: virtlet.cloud
465     spec:
466       affinity:
467         nodeAffinity:
468           requiredDuringSchedulingIgnoredDuringExecution:
469             nodeSelectorTerms:
470             - matchExpressions:
471               - key: extraRuntime
472                 operator: In
473                 values:
474                 - virtlet
475       containers:
476       - name: $firewall_deployment_name
477         image: $image_name
478         imagePullPolicy: IfNotPresent
479         tty: true
480         stdin: true
481         resources:
482           limits:
483             memory: 4Gi
484 DEPLOYMENT
485
486     cat << CONFIGMAP > sink_configmap.yaml
487 apiVersion: v1
488 kind: ConfigMap
489 metadata:
490   name: sink-configmap
491 data:
492   protected_net_gw: $protected_net_gw
493   protected_private_net_cidr: $protected_private_net_cidr
494 CONFIGMAP
495
496     cat << DEPLOYMENT > $sink_deployment_name.yaml
497 apiVersion: apps/v1
498 kind: Deployment
499 metadata:
500   name: $sink_deployment_name
501   labels:
502     app: vFirewall
503 spec:
504   replicas: 1
505   selector:
506     matchLabels:
507       app: vFirewall
508       context: darkstat
509   template:
510     metadata:
511       labels:
512         app: vFirewall
513         context: darkstat
514       annotations:
515         k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
516         k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [
517             { "name": "$protected_private_net", "ipAddress": "$vsn_private_ip_0", "interface": "eth1", "defaultGateway": "false" },
518             { "name": "$onap_private_net", "ipAddress": "$vsn_private_ip_1", "interface": "eth2" , "defaultGateway": "false"}
519         ]}'
520     spec:
521       containers:
522       - name: $sink_deployment_name
523         image: rtsood/onap-vfw-demo-sink:0.2.0
524         envFrom:
525         - configMapRef:
526             name: sink-configmap
527         imagePullPolicy: Always
528         tty: true
529         stdin: true
530         securityContext:
531           privileged: true
532
533       - name: darkstat
534         image: electrocucaracha/darkstat
535         imagePullPolicy: IfNotPresent
536         tty: true
537         stdin: true
538         ports:
539           - containerPort: 667
540 DEPLOYMENT
541     popd
542 }
543
544 # populate_CSAR_vms_vFW() - This function creates the content of CSAR file
545 # required for vFirewal using only virtual machines
546 function populate_CSAR_vms_vFW {
547     local csar_id=$1
548     ssh_key=$(cat $HOME/.ssh/id_rsa.pub)
549
550     _checks_args $csar_id
551     pushd ${CSAR_DIR}/${csar_id}
552
553     cat << META > metadata.yaml
554 resources:
555   network:
556     - $unprotected_private_net.yaml
557     - $protected_private_net.yaml
558     - $onap_private_net.yaml
559   deployment:
560     - $packetgen_deployment_name.yaml
561     - $firewall_deployment_name.yaml
562     - $sink_deployment_name.yaml
563 META
564
565     cat << NET > $unprotected_private_net.yaml
566 apiVersion: "k8s.cni.cncf.io/v1"
567 kind: NetworkAttachmentDefinition
568 metadata:
569   name: $unprotected_private_net
570 spec:
571   config: '{
572     "name": "unprotected",
573     "type": "bridge",
574     "ipam": {
575         "type": "host-local",
576         "subnet": "$protected_private_net_cidr"
577     }
578 }'
579 NET
580
581     cat << NET > $protected_private_net.yaml
582 apiVersion: "k8s.cni.cncf.io/v1"
583 kind: NetworkAttachmentDefinition
584 metadata:
585   name: $protected_private_net
586 spec:
587   config: '{
588     "name": "protected",
589     "type": "bridge",
590     "ipam": {
591         "type": "host-local",
592         "subnet": "$protected_net_cidr"
593     }
594 }'
595 NET
596
597     cat << NET > $onap_private_net.yaml
598 apiVersion: "k8s.cni.cncf.io/v1"
599 kind: NetworkAttachmentDefinition
600 metadata:
601   name: $onap_private_net
602 spec:
603   config: '{
604     "name": "onap",
605     "type": "bridge",
606     "ipam": {
607         "type": "host-local",
608         "subnet": "$onap_private_net_cidr"
609     }
610 }'
611 NET
612
613     proxy="apt:"
614     cloud_init_proxy="
615             - export demo_artifacts_version=$demo_artifacts_version
616             - export vfw_private_ip_0=$vfw_private_ip_0
617             - export vsn_private_ip_0=$vsn_private_ip_0
618             - export protected_net_cidr=$protected_net_cidr
619             - export dcae_collector_ip=$dcae_collector_ip
620             - export dcae_collector_port=$dcae_collector_port
621             - export protected_net_gw=$protected_net_gw
622             - export protected_private_net_cidr=$protected_private_net_cidr
623 "
624     if [[ -n "${http_proxy+x}" ]]; then
625         proxy+="
626             http_proxy: $http_proxy"
627         cloud_init_proxy+="
628             - export http_proxy=$http_proxy"
629     fi
630     if [[ -n "${https_proxy+x}" ]]; then
631         proxy+="
632             https_proxy: $https_proxy"
633         cloud_init_proxy+="
634             - export https_proxy=$https_proxy"
635     fi
636     if [[ -n "${no_proxy+x}" ]]; then
637         cloud_init_proxy+="
638             - export no_proxy=$no_proxy"
639     fi
640
641     cat << DEPLOYMENT > $packetgen_deployment_name.yaml
642 apiVersion: apps/v1
643 kind: Deployment
644 metadata:
645   name: $packetgen_deployment_name
646   labels:
647     app: vFirewall
648 spec:
649   replicas: 1
650   selector:
651     matchLabels:
652       app: vFirewall
653   template:
654     metadata:
655       labels:
656         app: vFirewall
657       annotations:
658         VirtletLibvirtCPUSetting: |
659           mode: host-model
660         VirtletCloudInitUserData: |
661           ssh_pwauth: True
662           users:
663           - name: admin
664             gecos: User
665             primary-group: admin
666             groups: users
667             sudo: ALL=(ALL) NOPASSWD:ALL
668             lock_passwd: false
669             # the password is "admin"
670             passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
671             ssh_authorized_keys:
672               $ssh_key
673           $proxy
674           runcmd:
675           $cloud_init_proxy
676             - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/$packetgen_deployment_name | sudo -E bash
677         VirtletSSHKeys: |
678           $ssh_key
679         VirtletRootVolumeSize: 5Gi
680         k8s.v1.cni.cncf.io/networks: '[
681             { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
682             { "name": "$onap_private_net", "interfaceRequest": "eth2" }
683         ]'
684         kubernetes.io/target-runtime: virtlet.cloud
685     spec:
686       affinity:
687         nodeAffinity:
688           requiredDuringSchedulingIgnoredDuringExecution:
689             nodeSelectorTerms:
690             - matchExpressions:
691               - key: extraRuntime
692                 operator: In
693                 values:
694                 - virtlet
695       containers:
696       - name: $packetgen_deployment_name
697         image: $image_name
698         imagePullPolicy: IfNotPresent
699         tty: true
700         stdin: true
701         ports:
702           - containerPort: 8183
703         resources:
704           limits:
705             memory: 4Gi
706 DEPLOYMENT
707
708     cat << DEPLOYMENT > $firewall_deployment_name.yaml
709 apiVersion: apps/v1
710 kind: Deployment
711 metadata:
712   name: $firewall_deployment_name
713   labels:
714     app: vFirewall
715 spec:
716   replicas: 1
717   selector:
718     matchLabels:
719       app: vFirewall
720   template:
721     metadata:
722       labels:
723         app: vFirewall
724       annotations:
725         VirtletLibvirtCPUSetting: |
726           mode: host-model
727         VirtletCloudInitUserData: |
728           ssh_pwauth: True
729           users:
730           - name: admin
731             gecos: User
732             primary-group: admin
733             groups: users
734             sudo: ALL=(ALL) NOPASSWD:ALL
735             lock_passwd: false
736             # the password is "admin"
737             passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
738             ssh_authorized_keys:
739               $ssh_key
740           $proxy
741           runcmd:
742             $cloud_init_proxy
743             - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/$firewall_deployment_name | sudo -E bash
744         VirtletSSHKeys: |
745           $ssh_key
746         VirtletRootVolumeSize: 5Gi
747         k8s.v1.cni.cncf.io/networks: '[
748             { "name": "$unprotected_private_net", "interfaceRequest": "eth1" },
749             { "name": "$protected_private_net", "interfaceRequest": "eth2" },
750             { "name": "$onap_private_net", "interfaceRequest": "eth3" }
751         ]'
752         kubernetes.io/target-runtime: virtlet.cloud
753     spec:
754       affinity:
755         nodeAffinity:
756           requiredDuringSchedulingIgnoredDuringExecution:
757             nodeSelectorTerms:
758             - matchExpressions:
759               - key: extraRuntime
760                 operator: In
761                 values:
762                 - virtlet
763       containers:
764       - name: $firewall_deployment_name
765         image: $image_name
766         imagePullPolicy: IfNotPresent
767         tty: true
768         stdin: true
769         resources:
770           limits:
771             memory: 4Gi
772 DEPLOYMENT
773
774     cat << DEPLOYMENT > $sink_deployment_name.yaml
775 apiVersion: apps/v1
776 kind: Deployment
777 metadata:
778   name: $sink_deployment_name
779   labels:
780     app: vFirewall
781 spec:
782   replicas: 1
783   selector:
784     matchLabels:
785       app: vFirewall
786   template:
787     metadata:
788       labels:
789         app: vFirewall
790       annotations:
791         VirtletLibvirtCPUSetting: |
792           mode: host-model
793         VirtletCloudInitUserData: |
794           ssh_pwauth: True
795           users:
796           - name: admin
797             gecos: User
798             primary-group: admin
799             groups: users
800             sudo: ALL=(ALL) NOPASSWD:ALL
801             lock_passwd: false
802             # the password is "admin"
803             passwd: "\$6\$rounds=4096\$QA5OCKHTE41\$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/"
804             ssh_authorized_keys:
805               $ssh_key
806           $proxy
807           runcmd:
808             $cloud_init_proxy
809             - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/$sink_deployment_name | sudo -E bash
810         VirtletSSHKeys: |
811           $ssh_key
812         VirtletRootVolumeSize: 5Gi
813         k8s.v1.cni.cncf.io/networks: '[
814             { "name": "$protected_private_net", "interfaceRequest": "eth1" },
815             { "name": "$onap_private_net", "interfaceRequest": "eth2" }
816         ]'
817         kubernetes.io/target-runtime: virtlet.cloud
818     spec:
819       affinity:
820         nodeAffinity:
821           requiredDuringSchedulingIgnoredDuringExecution:
822             nodeSelectorTerms:
823             - matchExpressions:
824               - key: extraRuntime
825                 operator: In
826                 values:
827                 - virtlet
828       containers:
829       - name: $sink_deployment_name
830         image: $image_name
831         imagePullPolicy: IfNotPresent
832         tty: true
833         stdin: true
834         ports:
835           - containerPort: 667
836         resources:
837           limits:
838             memory: 4Gi
839 DEPLOYMENT
840     popd
841 }
842
843 # populate_CSAR_multus() - This function creates the content of CSAR file
844 # required for testing Multus feature
845 function populate_CSAR_multus {
846     local csar_id=$1
847
848     _checks_args $csar_id
849     pushd ${CSAR_DIR}/${csar_id}
850
851     cat << META > metadata.yaml
852 resources:
853   network:
854     - bridge-network.yaml
855   deployment:
856     - $multus_deployment_name.yaml
857 META
858
859     cat << NET > bridge-network.yaml
860 apiVersion: "k8s.cni.cncf.io/v1"
861 kind: NetworkAttachmentDefinition
862 metadata:
863   name: bridge-conf
864 spec:
865   config: '{
866     "cniVersion": "0.3.0",
867     "name": "mynet",
868     "type": "bridge",
869     "ipam": {
870         "type": "host-local",
871         "subnet": "$onap_private_net_cidr"
872     }
873 }'
874 NET
875
876     cat << DEPLOYMENT > $multus_deployment_name.yaml
877 apiVersion: apps/v1
878 kind: Deployment
879 metadata:
880   name: $multus_deployment_name
881   labels:
882     app: multus
883 spec:
884   replicas: 1
885   selector:
886     matchLabels:
887       app: multus
888   template:
889     metadata:
890       labels:
891         app: multus
892       annotations:
893         k8s.v1.cni.cncf.io/networks: '[
894           { "name": "bridge-conf", "interfaceRequest": "eth1" },
895           { "name": "bridge-conf", "interfaceRequest": "eth2" }
896         ]'
897     spec:
898       containers:
899       - name: $multus_deployment_name
900         image: "busybox"
901         command: ["top"]
902         stdin: true
903         tty: true
904 DEPLOYMENT
905     popd
906 }
907
908 # populate_CSAR_virtlet() - This function creates the content of CSAR file
909 # required for testing Virtlet feature
910 function populate_CSAR_virtlet {
911     local csar_id=$1
912
913     _checks_args $csar_id
914     pushd ${CSAR_DIR}/${csar_id}
915
916     cat << META > metadata.yaml
917 resources:
918   deployment:
919     - $virtlet_deployment_name.yaml
920 META
921
922     cat << DEPLOYMENT > $virtlet_deployment_name.yaml
923 apiVersion: apps/v1
924 kind: Deployment
925 metadata:
926   name: $virtlet_deployment_name
927   labels:
928     app: virtlet
929 spec:
930   replicas: 1
931   selector:
932     matchLabels:
933       app: virtlet
934   template:
935     metadata:
936       labels:
937         app: virtlet
938       annotations:
939         VirtletLibvirtCPUSetting: |
940           mode: host-passthrough
941         # This tells CRI Proxy that this pod belongs to Virtlet runtime
942         kubernetes.io/target-runtime: virtlet.cloud
943         VirtletCloudInitUserData: |
944           ssh_pwauth: True
945           users:
946           - name: testuser
947             gecos: User
948             primary-group: testuser
949             groups: users
950             lock_passwd: false
951             shell: /bin/bash
952             # the password is "testuser"
953             passwd: "\$6\$rounds=4096\$wPs4Hz4tfs\$a8ssMnlvH.3GX88yxXKF2cKMlVULsnydoOKgkuStTErTq2dzKZiIx9R/pPWWh5JLxzoZEx7lsSX5T2jW5WISi1"
954             sudo: ALL=(ALL) NOPASSWD:ALL
955           runcmd:
956             - echo hello world
957     spec:
958       affinity:
959         nodeAffinity:
960           requiredDuringSchedulingIgnoredDuringExecution:
961             nodeSelectorTerms:
962             - matchExpressions:
963               - key: extraRuntime
964                 operator: In
965                 values:
966                 - virtlet
967       containers:
968       - name: $virtlet_deployment_name
969         # This specifies the image to use.
970         # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
971         # of the image name is prepended with https:// and used to download the image
972         image: $virtlet_image
973         imagePullPolicy: IfNotPresent
974         # tty and stdin required for "kubectl attach -t" to work
975         tty: true
976         stdin: true
977         resources:
978           limits:
979             # This memory limit is applied to the libvirt domain definition
980             memory: 160Mi
981 DEPLOYMENT
982     popd
983 }
984
985 # populate_CSAR_kubevirt() - This function creates the content of CSAR file
986 # required for testing Kubevirt feature
987 function populate_CSAR_kubevirt {
988     local csar_id=$1
989
990     _checks_args $csar_id
991     pushd ${CSAR_DIR}/${csar_id}
992
993     cat << META > metadata.yaml
994 resources:
995   virtualmachineinstance:
996     - $kubevirt_vmi_name.yaml
997 META
998
999     cat << DEPLOYMENT > $kubevirt_vmi_name.yaml
1000 apiVersion: kubevirt.io/v1
1001 kind: VirtualMachineInstance
1002 metadata:
1003   name: $kubevirt_vmi_name
1004 spec:
1005   domain:
1006     cpu:
1007       model: host-passthrough
1008     devices:
1009       disks:
1010       - disk:
1011           bus: virtio
1012         name: rootfs
1013       - disk:
1014           bus: virtio
1015         name: cloudinit
1016       interfaces:
1017       - name: default
1018         masquerade: {}
1019     resources:
1020       requests:
1021         memory: 256M
1022   networks:
1023   - name: default
1024     pod: {}
1025   volumes:
1026     - name: rootfs
1027       containerDisk:
1028         image: $kubevirt_image
1029         imagePullPolicy: IfNotPresent
1030     - name: cloudinit
1031       cloudInitNoCloud:
1032         userData: |
1033           #cloud-config
1034           ssh_pwauth: True
1035           users:
1036           - name: testuser
1037             gecos: User
1038             primary-group: testuser
1039             groups: users
1040             lock_passwd: false
1041             shell: /bin/bash
1042             # the password is "testuser"
1043             passwd: "\$6\$rounds=4096\$wPs4Hz4tfs\$a8ssMnlvH.3GX88yxXKF2cKMlVULsnydoOKgkuStTErTq2dzKZiIx9R/pPWWh5JLxzoZEx7lsSX5T2jW5WISi1"
1044             sudo: ALL=(ALL) NOPASSWD:ALL
1045           runcmd:
1046             - echo hello world
1047 DEPLOYMENT
1048     popd
1049 }
1050
1051 # populate_CSAR_plugin()- Creates content used for Plugin functional tests
1052 function populate_CSAR_plugin {
1053     local csar_id=$1
1054
1055     _checks_args $csar_id
1056     pushd ${CSAR_DIR}/${csar_id}
1057
1058     cat << META > metadata.yaml
1059 resources:
1060   deployment:
1061     - $plugin_deployment_name.yaml
1062   service:
1063     - service.yaml
1064 META
1065
1066     cat << DEPLOYMENT > $plugin_deployment_name.yaml
1067 apiVersion: apps/v1
1068 kind: Deployment
1069 metadata:
1070   name: $plugin_deployment_name
1071 spec:
1072   replicas: 1
1073   selector:
1074     matchLabels:
1075       app: plugin
1076   template:
1077     metadata:
1078       labels:
1079         app: plugin
1080     spec:
1081       containers:
1082       - name: $plugin_deployment_name
1083         image: "busybox"
1084         command: ["top"]
1085         stdin: true
1086         tty: true
1087 DEPLOYMENT
1088
1089     cat << SERVICE > service.yaml
1090 apiVersion: v1
1091 kind: Service
1092 metadata:
1093   name: $plugin_service_name
1094 spec:
1095   ports:
1096   - port: 80
1097     protocol: TCP
1098   selector:
1099     app: sise
1100 SERVICE
1101     popd
1102 }
1103
1104 # populate_CSAR_ovn4nfv() - Create content used for OVN4NFV functional test
1105 function populate_CSAR_ovn4nfv {
1106     local csar_id=$1
1107
1108     _checks_args $csar_id
1109     pushd ${CSAR_DIR}/${csar_id}
1110
1111     cat << META > metadata.yaml
1112 resources:
1113   onap_network:
1114     - ovn-port-net.yaml
1115     - ovn-priv-net.yaml
1116   network:
1117     - onap-ovn4nfvk8s-network.yaml
1118   deployment:
1119     - $ovn4nfv_deployment_name.yaml
1120 META
1121
1122     cat << MULTUS_NET > onap-ovn4nfvk8s-network.yaml
1123 apiVersion: "k8s.cni.cncf.io/v1"
1124 kind: NetworkAttachmentDefinition
1125 metadata:
1126   name: $ovn_multus_network_name
1127 spec:
1128   config: '{
1129       "cniVersion": "0.3.1",
1130       "name": "ovn4nfv-k8s-plugin",
1131       "type": "ovn4nfvk8s-cni"
1132     }'
1133 MULTUS_NET
1134
1135     cat << NETWORK > ovn-port-net.yaml
1136 apiVersion: k8s.plugin.opnfv.org/v1alpha1
1137 kind: Network
1138 metadata:
1139   name: ovn-port-net
1140 spec:
1141   cniType : ovn4nfv
1142   ipv4Subnets:
1143   - subnet: 172.16.33.0/24
1144     name: subnet1
1145     gateway: 172.16.33.1/24
1146 NETWORK
1147
1148     cat << NETWORK > ovn-priv-net.yaml
1149 apiVersion: k8s.plugin.opnfv.org/v1alpha1
1150 kind: Network
1151 metadata:
1152   name: ovn-priv-net
1153 spec:
1154   cniType : ovn4nfv
1155   ipv4Subnets:
1156   - subnet: 172.16.44.0/24
1157     name: subnet1
1158     gateway: 172.16.44.1/24
1159 NETWORK
1160
1161     cat << DEPLOYMENT > $ovn4nfv_deployment_name.yaml
1162 apiVersion: apps/v1
1163 kind: Deployment
1164 metadata:
1165   name: $ovn4nfv_deployment_name
1166   labels:
1167     app: ovn4nfv
1168 spec:
1169   replicas: 1
1170   selector:
1171     matchLabels:
1172       app: ovn4nfv
1173   template:
1174     metadata:
1175       labels:
1176         app: ovn4nfv
1177       annotations:
1178         k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
1179         k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net2" , "defaultGateway": "false"},
1180                       { "name": "ovn-priv-net", "interface": "net3" , "defaultGateway": "false"}]}'
1181     spec:
1182       containers:
1183       - name: $ovn4nfv_deployment_name
1184         image: "busybox"
1185         command: ["top"]
1186         stdin: true
1187         tty: true
1188 DEPLOYMENT
1189     popd
1190 }
1191
1192 # populate_CSAR_rbdefinition() - Function that populates CSAR folder
1193 # for testing resource bundle definition
1194 function populate_CSAR_rbdefinition {
1195     _checks_args "$1"
1196     pushd "${CSAR_DIR}/$1"
1197     print_msg "Create Helm Chart Archives"
1198     rm -f *.tar.gz
1199     tar -czf rb_profile.tar.gz -C $test_folder/vnfs/testrb/helm/profile .
1200     #Creates vault-consul-dev-0.0.0.tgz
1201     helm package $test_folder/vnfs/testrb/helm/vault-consul-dev --version 0.0.0
1202     popd
1203 }
1204
1205 # populate_CSAR_edgex_rbdefinition() - Function that populates CSAR folder
1206 # for testing resource bundle definition of edgex scenario
1207 function populate_CSAR_edgex_rbdefinition {
1208     _checks_args "$1"
1209     pushd "${CSAR_DIR}/$1"
1210     print_msg "Create Helm Chart Archives"
1211     rm -f *.tar.gz
1212     tar -czf rb_profile.tar.gz -C $test_folder/vnfs/edgex/profile .
1213     tar -czf rb_definition.tar.gz -C $test_folder/vnfs/edgex/helm edgex
1214     popd
1215 }
1216
1217 # populate_CSAR_fw_rbdefinition() - Function that populates CSAR folder
1218 # for testing resource bundle definition of firewall scenario
1219 function populate_CSAR_fw_rbdefinition {
1220     _checks_args "$1"
1221     pushd "${CSAR_DIR}/$1"
1222     print_msg "Create Helm Chart Archives for vFirewall"
1223     rm -f *.tar.gz
1224     # Reuse profile from the edgeX case as it is an empty profile
1225     tar -czf rb_profile.tar.gz -C $test_folder/vnfs/edgex/profile .
1226     tar -czf rb_definition.tar.gz -C $test_folder/../demo firewall
1227     popd
1228 }
1229
1230 # populate_CSAR_eaa_rbdefinition() - Function that populates CSAR folder
1231 # for testing resource bundle definition of openness eaa scenario
1232 function populate_CSAR_eaa_rbdefinition {
1233     _checks_args "$1"
1234     pushd "${CSAR_DIR}/$1"
1235     print_msg "Create Helm Chart Archives for Openness EAA"
1236     rm -f *.tar.gz
1237     tar -czf rb_profile.tar.gz -C $test_folder/openness/eaa/profile .
1238     tar -czf rb_definition.tar.gz -C $test_folder/openness/eaa/helm eaa
1239     popd
1240 }
1241
1242 # populate_CSAR_eaa_sample_app_rbdefinition() - Function that populates CSAR folder
1243 # for testing resource bundle definition of openness sample-app scenario
1244 function populate_CSAR_eaa_sample_app_rbdefinition {
1245     _checks_args "$1"
1246     pushd "${CSAR_DIR}/$1"
1247     print_msg "Create Helm Chart Archives for Openness EAA Sample Apps: producer and consumer"
1248     rm -f *.tar.gz
1249     tar -czf rb_profile.tar.gz -C $test_folder/openness/sample-app/profile .
1250     tar -czf rb_definition.tar.gz -C $test_folder/openness/sample-app/helm sample-app
1251     popd
1252 }
1253
1254 function populate_CSAR_composite_app_helm {
1255     _checks_args "$1"
1256     pushd "${CSAR_DIR}/$1"
1257     print_msg "Create Helm Chart Archives for compositeAppi with collectd and prometheus-grafana helm charts"
1258     rm -f *.tar.gz
1259     tar -czf collectd.tar.gz -C $test_folder/vnfs/comp-app/collection/app1/helm .
1260     tar -czf prometheus-operator.tar.gz -C $test_folder/vnfs/comp-app/collection/app2/helm .
1261     tar -czf collectd_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app1/profile .
1262     tar -czf prometheus-operator_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app2/profile .
1263     export prometheus_helm_path="${CSAR_DIR}/$1/prometheus-operator.tar.gz"
1264     export collectd_helm_path="${CSAR_DIR}/$1/collectd.tar.gz"
1265     popd
1266 }
1267
1268
1269 function populate_CSAR_operator_helm {
1270     _checks_args "$1"
1271     pushd "${CSAR_DIR}/$1"
1272     print_msg "Create Helm Chart Archives for operators"
1273     rm -f *.tar.gz
1274     #tar -czf operator.tar.gz -C $test_folder/vnfs/comp-app/collection/operators-latest/helm .
1275     #tar -czf operator_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/operators-latest/profile .
1276     tar -czf operator.tar.gz -C $test_folder/vnfs/comp-app/collection/operators/helm .
1277     tar -czf operator_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/operators/profile .
1278     export operator_helm_path="${CSAR_DIR}/$1/operator.tar.gz"
1279     popd
1280 }
1281
1282
1283 function populate_CSAR_m3db_helm {
1284     _checks_args "$1"
1285     pushd "${CSAR_DIR}/$1"
1286     print_msg "Create Helm Chart Archives for m3db"
1287     rm -f *.tar.gz
1288    #tar -czf m3db.tar.gz -C $test_folder/vnfs/comp-app/collection/app3-latest/helm .
1289    #tar -czf m3db_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/app3-latest/profile .
1290     tar -czf m3db.tar.gz -C $test_folder/vnfs/comp-app/collection/m3db/helm .
1291     tar -czf m3db_profile.tar.gz -C $test_folder/vnfs/comp-app/collection/m3db/profile .
1292     export m3db_helm_path="${CSAR_DIR}/$1/m3db.tar.gz"
1293     popd
1294 }