Adding helm charts for gloo ingress controller 75/86575/1
authorPramod <pramod.raghavendra.jayathirth@intel.com>
Tue, 30 Apr 2019 00:36:20 +0000 (17:36 -0700)
committerPramod <pramod.raghavendra.jayathirth@intel.com>
Tue, 30 Apr 2019 00:42:48 +0000 (17:42 -0700)
Helm charts for gloo with support for Kubernetes ingress

Issue-ID: ONAPARC-491
Change-Id: Ia6342f79edef19120da4c28f376a3399991a8310
Signed-off-by: Pramod <pramod.raghavendra.jayathirth@intel.com>
30 files changed:
vnfs/DAaaS/00-init/gloo/.helmignore [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/Chart.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/18-settings.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/values-ingress.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/values-knative.yaml [new file with mode: 0755]
vnfs/DAaaS/00-init/gloo/values.yaml [new file with mode: 0755]

diff --git a/vnfs/DAaaS/00-init/gloo/.helmignore b/vnfs/DAaaS/00-init/gloo/.helmignore
new file mode 100755 (executable)
index 0000000..08c5989
--- /dev/null
@@ -0,0 +1,28 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+# template files
+*-template.yaml
+
+# generator files
+*.go
+generate/
diff --git a/vnfs/DAaaS/00-init/gloo/Chart.yaml b/vnfs/DAaaS/00-init/gloo/Chart.yaml
new file mode 100755 (executable)
index 0000000..4f5e931
--- /dev/null
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Gloo Helm chart for Kubernetes
+home: https://gloo.solo.io/
+icon: https://raw.githubusercontent.com/solo-io/gloo/master/docs/img/Gloo-01.png
+name: gloo
+sources:
+- https://github.com/solo-io/gloo
+version: 0.13.18
diff --git a/vnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml b/vnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml
new file mode 100755 (executable)
index 0000000..92a37f9
--- /dev/null
@@ -0,0 +1,10 @@
+{{- if .Values.namespace.create -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: {{ .Release.Namespace }}
+  labels:
+    app: gloo
+  annotations:
+    "helm.sh/hook": pre-install
+{{- end}}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml
new file mode 100755 (executable)
index 0000000..7314b4e
--- /dev/null
@@ -0,0 +1,40 @@
+{{- if or (.Values.ingress.enabled) (.Values.settings.integrations.knative.enabled) }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: ingress
+  name: ingress
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.ingress.deployment.replicas }}
+  selector:
+    matchLabels:
+      gloo: ingress
+  template:
+    metadata:
+      labels:
+        gloo: ingress
+    spec:
+      containers:
+      - image: "{{ .Values.ingress.deployment.image.repository }}:{{ .Values.ingress.deployment.image.tag }}"
+        imagePullPolicy: {{ .Values.ingress.deployment.image.pullPolicy }}
+        name: ingress
+        env:
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+{{- if .Values.settings.integrations.knative.enabled }}
+        - name: "ENABLE_KNATIVE_INGRESS"
+          value: "true"
+{{- end }}
+
+{{- if not (.Values.ingress.enabled) }}
+        - name: "DISABLE_KUBE_INGRESS"
+          value: "true"
+{{- end }}
+
+
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml b/vnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml
new file mode 100755 (executable)
index 0000000..2c11117
--- /dev/null
@@ -0,0 +1,111 @@
+{{- if .Values.crds.create }}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: settings.gloo.solo.io
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    gloo: settings
+spec:
+  group: gloo.solo.io
+  names:
+    kind: Settings
+    listKind: SettingsList
+    plural: settings
+    shortNames:
+      - st
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: gateways.gateway.solo.io
+  annotations:
+    "helm.sh/hook": crd-install
+spec:
+  group: gateway.solo.io
+  names:
+    kind: Gateway
+    listKind: GatewayList
+    plural: gateways
+    shortNames:
+      - gw
+    singular: gateway
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: virtualservices.gateway.solo.io
+  annotations:
+    "helm.sh/hook": crd-install
+spec:
+  group: gateway.solo.io
+  names:
+    kind: VirtualService
+    listKind: VirtualServiceList
+    plural: virtualservices
+    shortNames:
+      - vs
+    singular: virtualservice
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: proxies.gloo.solo.io
+  annotations:
+    "helm.sh/hook": crd-install
+spec:
+  group: gloo.solo.io
+  names:
+    kind: Proxy
+    listKind: ProxyList
+    plural: proxies
+    shortNames:
+      - px
+    singular: proxy
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: upstreams.gloo.solo.io
+  annotations:
+    "helm.sh/hook": crd-install
+spec:
+  group: gloo.solo.io
+  names:
+    kind: Upstream
+    listKind: UpstreamList
+    plural: upstreams
+    shortNames:
+      - us
+    singular: upstream
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: upstreamgroups.gloo.solo.io
+  annotations:
+    "helm.sh/hook": crd-install
+spec:
+  group: gloo.solo.io
+  names:
+    kind: UpstreamGroup
+    listKind: UpstreamGroupList
+    plural: upstreamgroups
+    shortNames:
+      - ug
+    singular: upstreamgroup
+  scope: Namespaced
+  version: v1
+---
+{{- end}}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml b/vnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
new file mode 100755 (executable)
index 0000000..3c9987e
--- /dev/null
@@ -0,0 +1,343 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+---
+# ↓ required as knative dependency on istio crds is hard-coded right now ↓
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: virtualservices.networking.istio.io
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    app: istio-pilot
+spec:
+  group: networking.istio.io
+  names:
+    kind: VirtualService
+    listKind: VirtualServiceList
+    plural: virtualservices
+    singular: virtualservice
+    categories:
+      - istio-io
+      - networking-istio-io
+  scope: Namespaced
+  version: v1alpha3
+
+# ↑ required as knative dependency on istio crds is hard-coded right now ↑
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: certificates.networking.internal.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.conditions[?(@.type=="Ready")].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=="Ready")].reason
+      name: Reason
+      type: string
+  group: networking.internal.knative.dev
+  names:
+    categories:
+      - all
+      - knative-internal
+      - networking
+    kind: Certificate
+    plural: certificates
+    shortNames:
+      - kcert
+    singular: certificate
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: clusteringresses.networking.internal.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+      name: Reason
+      type: string
+  group: networking.internal.knative.dev
+  names:
+    categories:
+      - all
+      - knative-internal
+      - networking
+    kind: ClusterIngress
+    plural: clusteringresses
+    singular: clusteringress
+  scope: Cluster
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: configurations.serving.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.latestCreatedRevisionName
+      name: LatestCreated
+      type: string
+    - JSONPath: .status.latestReadyRevisionName
+      name: LatestReady
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+      name: Reason
+      type: string
+  group: serving.knative.dev
+  names:
+    categories:
+      - all
+      - knative
+      - serving
+    kind: Configuration
+    plural: configurations
+    shortNames:
+      - config
+      - cfg
+    singular: configuration
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+  name: images.caching.internal.knative.dev
+spec:
+  group: caching.internal.knative.dev
+  names:
+    categories:
+      - all
+      - knative-internal
+      - caching
+    kind: Image
+    plural: images
+    shortNames:
+      - img
+    singular: image
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: podautoscalers.autoscaling.internal.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+      name: Reason
+      type: string
+  group: autoscaling.internal.knative.dev
+  names:
+    categories:
+      - all
+      - knative-internal
+      - autoscaling
+    kind: PodAutoscaler
+    plural: podautoscalers
+    shortNames:
+      - kpa
+    singular: podautoscaler
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: revisions.serving.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.serviceName
+      name: Service Name
+      type: string
+    - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration']
+      name: Generation
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+      name: Reason
+      type: string
+  group: serving.knative.dev
+  names:
+    categories:
+      - all
+      - knative
+      - serving
+    kind: Revision
+    plural: revisions
+    shortNames:
+      - rev
+    singular: revision
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: routes.serving.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.domain
+      name: Domain
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+      name: Reason
+      type: string
+  group: serving.knative.dev
+  names:
+    categories:
+      - all
+      - knative
+      - serving
+    kind: Route
+    plural: routes
+    shortNames:
+      - rt
+    singular: route
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: services.serving.knative.dev
+spec:
+  additionalPrinterColumns:
+    - JSONPath: .status.domain
+      name: Domain
+      type: string
+    - JSONPath: .status.latestCreatedRevisionName
+      name: LatestCreated
+      type: string
+    - JSONPath: .status.latestReadyRevisionName
+      name: LatestReady
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+      name: Reason
+      type: string
+  group: serving.knative.dev
+  names:
+    categories:
+      - all
+      - knative
+      - serving
+    kind: Service
+    plural: services
+    shortNames:
+      - kservice
+      - ksvc
+    singular: service
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    "helm.sh/hook": crd-install
+  labels:
+    knative.dev/crd-install: "true"
+    serving.knative.dev/release: devel
+  name: serverlessservices.networking.internal.knative.dev
+spec:
+  group: networking.internal.knative.dev
+  names:
+    categories:
+      - all
+      - knative-internal
+      - networking
+    kind: ServerlessService
+    plural: serverlessservices
+    shortNames:
+      - sks
+    singular: serverlessservice
+  scope: Namespaced
+  subresources:
+    status: {}
+  version: v1alpha1
+
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
new file mode 100755 (executable)
index 0000000..5dc131e
--- /dev/null
@@ -0,0 +1,65 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: ingress-proxy
+  name: ingress-proxy
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.ingressProxy.deployment.replicas }}
+  selector:
+    matchLabels:
+      gloo: ingress-proxy
+  template:
+    metadata:
+      labels:
+        gloo: ingress-proxy
+{{- with .Values.ingressProxy.deployment.extraAnnotations }}
+      annotations:
+{{toYaml  . | indent 8}}{{- end }}
+    spec:
+      containers:
+      - args: ["--disable-hot-restart"]
+        env:
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        image: "{{ .Values.ingressProxy.deployment.image.repository }}:{{ .Values.ingressProxy.deployment.image.tag }}"
+        imagePullPolicy: {{ .Values.ingressProxy.deployment.image.pullPolicy }}
+        name: ingress-proxy
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - ALL
+            add:
+            - NET_BIND_SERVICE
+        ports:
+        - containerPort: {{ .Values.ingressProxy.deployment.httpPort }}
+          name: http
+          protocol: TCP
+        - containerPort: {{ .Values.ingressProxy.deployment.httpsPort }}
+          name: https
+          protocol: TCP
+{{- with .Values.ingressProxy.deployment.extraPorts }}
+{{toYaml  . | indent 8}}{{- end }}
+        volumeMounts:
+        - mountPath: /etc/envoy
+          name: envoy-config
+      {{- if .Values.ingressProxy.deployment.image.pullSecret }}
+      imagePullSecrets:
+        - name: {{ .Values.ingressProxy.deployment.image.pullSecret }}{{end}}
+      volumes:
+      - configMap:
+          name: ingress-envoy-config
+        name: envoy-config
+
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml b/vnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
new file mode 100755 (executable)
index 0000000..8938a47
--- /dev/null
@@ -0,0 +1,52 @@
+{{- if .Values.ingress.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ingress-envoy-config
+  namespace: {{ .Release.Namespace }}
+  labels:
+    app: gloo
+    gloo: gateway-proxy
+data:
+{{ if (empty .Values.ingressProxy.configMap.data) }}
+  envoy.yaml: |
+    node:
+      cluster: ingress
+      id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+      metadata:
+        # this line must match !
+        role: "{{ "{{" }}.PodNamespace{{ "}}" }}~ingress-proxy"
+    static_resources:
+      clusters:
+      - name: xds_cluster
+        connect_timeout: 5.000s
+        load_assignment:
+          cluster_name: xds_cluster
+          endpoints:
+          - lb_endpoints:
+            - endpoint:
+                address:
+                  socket_address:
+                    address: gloo
+                    port_value: {{ .Values.gloo.deployment.xdsPort }}
+        http2_protocol_options: {}
+        type: STRICT_DNS
+    dynamic_resources:
+      ads_config:
+        api_type: GRPC
+        grpc_services:
+        - envoy_grpc: {cluster_name: xds_cluster}
+      cds_config:
+        ads: {}
+      lds_config:
+        ads: {}
+    admin:
+      access_log_path: /dev/null
+      address:
+        socket_address:
+          address: 127.0.0.1
+          port_value: 19000
+{{- else}}{{ toYaml .Values.ingressProxy.configMap.data | indent 2}}{{- end}}
+
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml
new file mode 100755 (executable)
index 0000000..583e8bc
--- /dev/null
@@ -0,0 +1,23 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: gloo
+    gloo: ingress-proxy
+  name: ingress-proxy
+  namespace: {{ .Release.Namespace }}
+spec:
+  ports:
+  - port: {{ .Values.ingressProxy.deployment.httpPort }}
+    protocol: TCP
+    name: http
+  - port: {{ .Values.ingressProxy.deployment.httpsPort }}
+    protocol: TCP
+    name: https
+  selector:
+    gloo: ingress-proxy
+  type: LoadBalancer
+
+
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
new file mode 100755 (executable)
index 0000000..fb7874e
--- /dev/null
@@ -0,0 +1,58 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: clusteringress-proxy
+  name: clusteringress-proxy
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.settings.integrations.knative.proxy.replicas }}
+  selector:
+    matchLabels:
+      gloo: clusteringress-proxy
+  template:
+    metadata:
+      labels:
+        gloo: clusteringress-proxy
+    spec:
+      containers:
+      - args: ["--disable-hot-restart"]
+        env:
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        image: {{ .Values.settings.integrations.knative.proxy.image.repository }}:{{ .Values.settings.integrations.knative.proxy.image.tag }}
+        imagePullPolicy: {{ .Values.settings.integrations.knative.proxy.image.pullPolicy }}
+        name: clusteringress-proxy
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - ALL
+            add:
+            - NET_BIND_SERVICE
+        ports:
+        - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+          name: http
+          protocol: TCP
+        - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+          name: https
+          protocol: TCP
+        volumeMounts:
+        - mountPath: /etc/envoy
+          name: envoy-config
+      volumes:
+      - configMap:
+          name: clusteringress-envoy-config
+        name: envoy-config
+
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml b/vnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
new file mode 100755 (executable)
index 0000000..85a6421
--- /dev/null
@@ -0,0 +1,49 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: clusteringress-envoy-config
+  namespace: {{ .Release.Namespace }}
+  labels:
+    app: gloo
+    gloo: clusteringress-proxy
+data:
+  envoy.yaml: |
+    node:
+      cluster: clusteringress
+      id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+      metadata:
+        # this line must match !
+        role: "{{ "{{" }}.PodNamespace{{ "}}" }}~clusteringress-proxy"
+    static_resources:
+      clusters:
+      - name: xds_cluster
+        connect_timeout: 5.000s
+        load_assignment:
+          cluster_name: xds_cluster
+          endpoints:
+          - lb_endpoints:
+            - endpoint:
+                address:
+                  socket_address:
+                    address: gloo
+                    port_value: {{ .Values.gloo.deployment.xdsPort }}
+        http2_protocol_options: {}
+        type: STRICT_DNS
+    dynamic_resources:
+      ads_config:
+        api_type: GRPC
+        grpc_services:
+        - envoy_grpc: {cluster_name: xds_cluster}
+      cds_config:
+        ads: {}
+      lds_config:
+        ads: {}
+    admin:
+      access_log_path: /dev/null
+      address:
+        socket_address:
+          address: 127.0.0.1
+          port_value: 19000
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
new file mode 100755 (executable)
index 0000000..7e25bee
--- /dev/null
@@ -0,0 +1,21 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: gloo
+    gloo: clusteringress-proxy
+  name: clusteringress-proxy
+  namespace: {{ .Release.Namespace }}
+spec:
+  ports:
+  - port: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+    protocol: TCP
+    name: http
+  - port: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+    protocol: TCP
+    name: https
+  selector:
+    gloo: clusteringress-proxy
+  type: LoadBalancer
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml b/vnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
new file mode 100755 (executable)
index 0000000..a73cf1f
--- /dev/null
@@ -0,0 +1,982 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    app: gloo
+    istio-injection: enabled
+    serving.knative.dev/release: devel
+  name: knative-serving
+
+---
+aggregationRule:
+  clusterRoleSelectors:
+    - matchLabels:
+        serving.knative.dev/controller: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: knative-serving-admin
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    serving.knative.dev/controller: "true"
+    serving.knative.dev/release: devel
+  name: knative-serving-core
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - pods
+      - namespaces
+      - secrets
+      - configmaps
+      - endpoints
+      - services
+      - events
+      - serviceaccounts
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - extensions
+    resources:
+      - ingresses
+      - deployments
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - apps
+    resources:
+      - deployments
+      - deployments/scale
+      - statefulsets
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - admissionregistration.k8s.io
+    resources:
+      - mutatingwebhookconfigurations
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - apiextensions.k8s.io
+    resources:
+      - customresourcedefinitions
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - serving.knative.dev
+    resources:
+      - configurations
+      - routes
+      - revisions
+      - services
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - serving.knative.dev
+    resources:
+      - configurations/status
+      - routes/status
+      - revisions/status
+      - services/status
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - autoscaling.internal.knative.dev
+    resources:
+      - podautoscalers
+      - podautoscalers/status
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - autoscaling
+    resources:
+      - horizontalpodautoscalers
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - caching.internal.knative.dev
+    resources:
+      - images
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+  - apiGroups:
+      - networking.internal.knative.dev
+    resources:
+      - clusteringresses
+      - clusteringresses/status
+      - serverlessservices
+      - serverlessservices/status
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - deletecollection
+      - patch
+      - watch
+  - apiGroups:
+      - networking.istio.io
+    resources:
+      - virtualservices
+    verbs:
+      - get
+      - list
+      - create
+      - update
+      - delete
+      - patch
+      - watch
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: controller
+  namespace: knative-serving
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: knative-serving-controller-admin
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: knative-serving-admin
+subjects:
+  - kind: ServiceAccount
+    name: controller
+    namespace: knative-serving
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: activator
+    serving.knative.dev/release: devel
+  name: activator-service
+  namespace: knative-serving
+spec:
+  ports:
+    - name: http
+      nodePort: null
+      port: 80
+      protocol: TCP
+      targetPort: 8080
+    - name: http2
+      port: 81
+      protocol: TCP
+      targetPort: 8081
+    - name: metrics
+      nodePort: null
+      port: 9090
+      protocol: TCP
+      targetPort: 9090
+  selector:
+    app: activator
+  type: ClusterIP
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: controller
+    serving.knative.dev/release: devel
+  name: controller
+  namespace: knative-serving
+spec:
+  ports:
+    - name: metrics
+      port: 9090
+      protocol: TCP
+      targetPort: 9090
+  selector:
+    app: controller
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    role: webhook
+    serving.knative.dev/release: devel
+  name: webhook
+  namespace: knative-serving
+spec:
+  ports:
+    - port: 443
+      targetPort: 443
+  selector:
+    role: webhook
+
+---
+apiVersion: caching.internal.knative.dev/v1alpha1
+kind: Image
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: queue-proxy
+  namespace: knative-serving
+spec:
+  image: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: activator
+  namespace: knative-serving
+spec:
+  selector:
+    matchLabels:
+      app: activator
+      role: activator
+  template:
+    metadata:
+      annotations:
+        sidecar.istio.io/inject: "true"
+      labels:
+        app: activator
+        role: activator
+        serving.knative.dev/release: devel
+    spec:
+      containers:
+        - args:
+            - -logtostderr=false
+            - -stderrthreshold=FATAL
+          env:
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: SYSTEM_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONFIG_LOGGING_NAME
+              value: config-logging
+          image: gcr.io/knative-releases/github.com/knative/serving/cmd/activator@sha256:60630ac88d8cb67debd1e2ab1ecd6ec3ff6cbab2336dda8e7ae1c01ebead76c0
+          livenessProbe:
+            httpGet:
+              path: /healthz
+              port: 8080
+          name: activator
+          ports:
+            - containerPort: 8080
+              name: http1-port
+            - containerPort: 8081
+              name: h2c-port
+            - containerPort: 9090
+              name: metrics-port
+          readinessProbe:
+            httpGet:
+              path: /healthz
+              port: 8080
+          resources:
+            limits:
+              cpu: 200m
+              memory: 600Mi
+            requests:
+              cpu: 20m
+              memory: 60Mi
+          volumeMounts:
+            - mountPath: /etc/config-logging
+              name: config-logging
+            - mountPath: /etc/config-observability
+              name: config-observability
+      serviceAccountName: controller
+      volumes:
+        - configMap:
+            name: config-logging
+          name: config-logging
+        - configMap:
+            name: config-observability
+          name: config-observability
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: autoscaler
+    serving.knative.dev/release: devel
+  name: autoscaler
+  namespace: knative-serving
+spec:
+  ports:
+    - name: http
+      port: 8080
+      protocol: TCP
+      targetPort: 8080
+    - name: metrics
+      port: 9090
+      protocol: TCP
+      targetPort: 9090
+  selector:
+    app: autoscaler
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: autoscaler
+  namespace: knative-serving
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: autoscaler
+  template:
+    metadata:
+      annotations:
+        sidecar.istio.io/inject: "true"
+      labels:
+        app: autoscaler
+    spec:
+      containers:
+        - env:
+            - name: SYSTEM_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONFIG_LOGGING_NAME
+              value: config-logging
+          image: gcr.io/knative-releases/github.com/knative/serving/cmd/autoscaler@sha256:442f99e3a55653b19137b44c1d00f681b594d322cb39c1297820eb717e2134ba
+          name: autoscaler
+          ports:
+            - containerPort: 8080
+              name: websocket
+            - containerPort: 9090
+              name: metrics
+          resources:
+            limits:
+              cpu: 300m
+              memory: 400Mi
+            requests:
+              cpu: 30m
+              memory: 40Mi
+          volumeMounts:
+            - mountPath: /etc/config-autoscaler
+              name: config-autoscaler
+            - mountPath: /etc/config-logging
+              name: config-logging
+            - mountPath: /etc/config-observability
+              name: config-observability
+      serviceAccountName: controller
+      volumes:
+        - configMap:
+            name: config-autoscaler
+          name: config-autoscaler
+        - configMap:
+            name: config-logging
+          name: config-logging
+        - configMap:
+            name: config-observability
+          name: config-observability
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # The Revision ContainerConcurrency field specifies the maximum number
+    # of requests the Container can handle at once. Container concurrency
+    # target percentage is how much of that maximum to use in a stable
+    # state. E.g. if a Revision specifies ContainerConcurrency of 10, then
+    # the Autoscaler will try to maintain 7 concurrent connections per pod
+    # on average. A value of 0.7 is chosen because the Autoscaler panics
+    # when concurrency exceeds 2x the desired set point. So we will panic
+    # before we reach the limit.
+    container-concurrency-target-percentage: "1.0"
+
+    # The container concurrency target default is what the Autoscaler will
+    # try to maintain when the Revision specifies unlimited concurrency.
+    # Even when specifying unlimited concurrency, the autoscaler will
+    # horizontally scale the application based on this target concurrency.
+    #
+    # A value of 100 is chosen because it's enough to allow vertical pod
+    # autoscaling to tune resource requests. E.g. maintaining 1 concurrent
+    # "hello world" request doesn't consume enough resources to allow VPA
+    # to achieve efficient resource usage (VPA CPU minimum is 300m).
+    container-concurrency-target-default: "100"
+
+    # When operating in a stable mode, the autoscaler operates on the
+    # average concurrency over the stable window.
+    stable-window: "60s"
+
+    # When observed average concurrency during the panic window reaches 2x
+    # the target concurrency, the autoscaler enters panic mode. When
+    # operating in panic mode, the autoscaler operates on the average
+    # concurrency over the panic window.
+    panic-window: "6s"
+
+    # Max scale up rate limits the rate at which the autoscaler will
+    # increase pod count. It is the maximum ratio of desired pods versus
+    # observed pods.
+    max-scale-up-rate: "10"
+
+    # Scale to zero feature flag
+    enable-scale-to-zero: "true"
+
+    # Tick interval is the time between autoscaling calculations.
+    tick-interval: "2s"
+
+    # Dynamic parameters (take effect when config map is updated):
+
+    # Scale to zero grace period is the time an inactive revision is left
+    # running before it is scaled to zero (min: 30s).
+    scale-to-zero-grace-period: "30s"
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-autoscaler
+  namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # List of repositories for which tag to digest resolving should be skipped
+    registriesSkippingTagResolving: "ko.local,dev.local"
+  queueSidecarImage: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-controller
+  namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # revision-timeout-seconds contains the default number of
+    # seconds to use for the revision's per-request timeout, if
+    # none is specified.
+    revision-timeout-seconds: "300"  # 5 minutes
+
+    # revision-cpu-request contains the cpu allocation to assign
+    # to revisions by default.
+    revision-cpu-request: "400m"  # 0.4 of a CPU (aka 400 milli-CPU)
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-defaults
+  namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # Default value for domain.
+    # Although it will match all routes, it is the least-specific rule so it
+    # will only be used if no other domain matches.
+    example.com: |
+
+    # These are example settings of domain.
+    # example.org will be used for routes having app=nonprofit.
+    example.org: |
+      selector:
+        app: nonprofit
+
+    # Routes having domain suffix of 'svc.cluster.local' will not be exposed
+    # through Ingress. You can define your own label selector to assign that
+    # domain suffix to your Route here, or you can set the label
+    #    "serving.knative.dev/visibility=cluster-local"
+    # to achieve the same effect.  This shows how to make routes having
+    # the label app=secret only exposed to the local cluster.
+    svc.cluster.local: |
+      selector:
+        app: secret
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-domain
+  namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # Delay after revision creation before considering it for GC
+    stale-revision-create-delay: "24h"
+
+    # Duration since a route has been pointed at a revision before it should be GC'd
+    # This minus lastpinned-debounce be longer than the controller resync period (10 hours)
+    stale-revision-timeout: "15h"
+
+    # Minimum number of generations of revisions to keep before considering for GC
+    stale-revision-minimum-generations: "1"
+
+    # To avoid constant updates, we allow an existing annotation to be stale by this
+    # amount before we update the timestamp
+    stale-revision-lastpinned-debounce: "5h"
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-gc
+  namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  labels:
+    networking.knative.dev/ingress-provider: istio
+    serving.knative.dev/release: devel
+  name: config-istio
+  namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # Common configuration for all Knative codebase
+    zap-logger-config: |
+      {
+        "level": "info",
+        "development": false,
+        "outputPaths": ["stdout"],
+        "errorOutputPaths": ["stderr"],
+        "encoding": "json",
+        "encoderConfig": {
+          "timeKey": "ts",
+          "levelKey": "level",
+          "nameKey": "logger",
+          "callerKey": "caller",
+          "messageKey": "msg",
+          "stacktraceKey": "stacktrace",
+          "lineEnding": "",
+          "levelEncoder": "",
+          "timeEncoder": "iso8601",
+          "durationEncoder": "",
+          "callerEncoder": ""
+        }
+      }
+
+    # Log level overrides
+    # For all components except the autoscaler and queue proxy,
+    # changes are be picked up immediately.
+    # For autoscaler and queue proxy, changes require recreation of the pods.
+    loglevel.controller: "info"
+    loglevel.autoscaler: "info"
+    loglevel.queueproxy: "info"
+    loglevel.webhook: "info"
+    loglevel.activator: "info"
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-logging
+  namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-network
+  namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+  _example: |
+    ################################
+    #                              #
+    #    EXAMPLE CONFIGURATION     #
+    #                              #
+    ################################
+
+    # This block is not actually functional configuration,
+    # but serves to illustrate the available configuration
+    # options and document them in a way that is accessible
+    # to users that `kubectl edit` this config map.
+    #
+    # These sample configuration options may be copied out of
+    # this block and unindented to actually change the configuration.
+
+    # logging.enable-var-log-collection defaults to false.
+    # A fluentd sidecar will be set up to collect var log if
+    # this flag is true.
+    logging.enable-var-log-collection: false
+
+    # logging.fluentd-sidecar-image provides the fluentd sidecar image
+    # to inject as a sidecar to collect logs from /var/log.
+    # Must be presented if logging.enable-var-log-collection is true.
+    logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
+
+    # logging.fluentd-sidecar-output-config provides the configuration
+    # for the fluentd sidecar, which will be placed into a configmap and
+    # mounted into the fluentd sidecar image.
+    logging.fluentd-sidecar-output-config: |
+      # Parse json log before sending to Elastic Search
+      <filter **>
+        @type parser
+        key_name log
+        <parse>
+          @type multi_format
+          <pattern>
+            format json
+            time_key fluentd-time # fluentd-time is reserved for structured logs
+            time_format %Y-%m-%dT%H:%M:%S.%NZ
+          </pattern>
+          <pattern>
+            format none
+            message_key log
+          </pattern>
+        </parse>
+      </filter>
+      # Send to Elastic Search
+      <match **>
+        @id elasticsearch
+        @type elasticsearch
+        @log_level info
+        include_tag_key true
+        # Elasticsearch service is in monitoring namespace.
+        host elasticsearch-logging.knative-monitoring
+        port 9200
+        logstash_format true
+        <buffer>
+          @type file
+          path /var/log/fluentd-buffers/kubernetes.system.buffer
+          flush_mode interval
+          retry_type exponential_backoff
+          flush_thread_count 2
+          flush_interval 5s
+          retry_forever
+          retry_max_interval 30
+          chunk_limit_size 2M
+          queue_limit_length 8
+          overflow_action block
+        </buffer>
+      </match>
+
+    # logging.revision-url-template provides a template to use for producing the
+    # logging URL that is injected into the status of each Revision.
+    # This value is what you might use the the Knative monitoring bundle, and provides
+    # access to Kibana after setting up kubectl proxy.
+    logging.revision-url-template: |
+      http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
+
+    # If non-empty, this enables queue proxy writing request logs to stdout.
+    # The value determines the shape of the request logs and it must be a valid go text/template.
+    # It is important to keep this as a single line. Multiple lines are parsed as separate entities
+    # by most collection agents and will split the request logs into multiple records.
+    #
+    # The following fields and functions are available to the template:
+    #
+    # Request: An http.Request (see https://golang.org/pkg/net/http/#Request)
+    # representing an HTTP request received by the server.
+    #
+    # Response:
+    # struct {
+    #   Code    int       // HTTP status code (see https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml)
+    #   Size    int       // An int representing the size of the response.
+    #   Latency float64   // A float64 representing the latency of the response in seconds.
+    # }
+    #
+    # Revision:
+    # struct {
+    #   Name          string  // Knative revision name
+    #   Namespace     string  // Knative revision namespace
+    #   Service       string  // Knative service name
+    #   Configuration string  // Knative configuration name
+    #   PodName       string  // Name of the pod hosting the revision
+    #   PodIP         string  // IP of the pod hosting the revision
+    # }
+    #
+    logging.request-log-template: '{"httpRequest": {"requestMethod": "{{ "{{" }}.Request.Method{{ "{{" }}", "requestUrl": "{{ "{{" }}js .Request.RequestURI{{ "{{" }}", "requestSize": "{{ "{{" }}.Request.ContentLength{{ "{{" }}", "status": {{ "{{" }}.Response.Code{{ "{{" }}, "responseSize": "{{ "{{" }}.Response.Size{{ "{{" }}", "userAgent": "{{ "{{" }}js .Request.UserAgent{{ "{{" }}", "remoteIp": "{{ "{{" }}js .Request.RemoteAddr{{ "{{" }}", "serverIp": "{{ "{{" }}.Revision.PodIP{{ "{{" }}", "referer": "{{ "{{" }}js .Request.Referer{{ "{{" }}", "latency": "{{ "{{" }}.Response.Latency{{ "{{" }}s", "protocol": "{{ "{{" }}.Request.Proto{{ "{{" }}"}, "traceId": "{{ "{{" }}index .Request.Header "X-B3-Traceid"{{ "{{" }}"}'
+
+    # metrics.backend-destination field specifies the system metrics destination.
+    # It supports either prometheus (the default) or stackdriver.
+    # Note: Using stackdriver will incur additional charges
+    metrics.backend-destination: prometheus
+
+    # metrics.request-metrics-backend-destination specifies the request metrics
+    # destination. If non-empty, it enables queue proxy to send request metrics.
+    # Currently supported values: prometheus, stackdriver.
+    metrics.request-metrics-backend-destination: prometheus
+
+    # metrics.stackdriver-project-id field specifies the stackdriver project ID. This
+    # field is optional. When running on GCE, application default credentials will be
+    # used if this field is not provided.
+    metrics.stackdriver-project-id: "<your stackdriver project id>"
+
+    # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to
+    # Stackdriver using "global" resource type and custom metric type if the
+    # metrics are not supported by "knative_revision" resource type. Setting this
+    # flag to "true" could cause extra Stackdriver charge.
+    # If metrics.backend-destination is not Stackdriver, this is ignored.
+    metrics.allow-stackdriver-custom-metrics: "false"
+kind: ConfigMap
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: config-observability
+  namespace: knative-serving
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: controller
+  namespace: knative-serving
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: controller
+  template:
+    metadata:
+      annotations:
+        sidecar.istio.io/inject: "false"
+      labels:
+        app: controller
+    spec:
+      containers:
+        - env:
+            - name: SYSTEM_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONFIG_LOGGING_NAME
+              value: config-logging
+          image: gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:25af5f3adad8b65db3126e0d6e90aa36835c124c24d9d72ffbdd7ee739a7f571
+          name: controller
+          ports:
+            - containerPort: 9090
+              name: metrics
+          resources:
+            limits:
+              cpu: 1000m
+              memory: 1000Mi
+            requests:
+              cpu: 100m
+              memory: 100Mi
+          volumeMounts:
+            - mountPath: /etc/config-logging
+              name: config-logging
+      serviceAccountName: controller
+      volumes:
+        - configMap:
+            name: config-logging
+          name: config-logging
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    serving.knative.dev/release: devel
+  name: webhook
+  namespace: knative-serving
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: webhook
+      role: webhook
+  template:
+    metadata:
+      annotations:
+        sidecar.istio.io/inject: "false"
+      labels:
+        app: webhook
+        role: webhook
+    spec:
+      containers:
+        - env:
+            - name: SYSTEM_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: CONFIG_LOGGING_NAME
+              value: config-logging
+          image: gcr.io/knative-releases/github.com/knative/serving/cmd/webhook@sha256:d1ba3e2c0d739084ff508629db001619cea9cc8780685e85dd910363774eaef6
+          name: webhook
+          resources:
+            limits:
+              cpu: 200m
+              memory: 200Mi
+            requests:
+              cpu: 20m
+              memory: 20Mi
+          volumeMounts:
+            - mountPath: /etc/config-logging
+              name: config-logging
+      serviceAccountName: controller
+      volumes:
+        - configMap:
+            name: config-logging
+          name: config-logging
+
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/18-settings.yaml b/vnfs/DAaaS/00-init/gloo/templates/18-settings.yaml
new file mode 100755 (executable)
index 0000000..a2eec08
--- /dev/null
@@ -0,0 +1,30 @@
+{{ if .Values.settings.create }}
+
+apiVersion: gloo.solo.io/v1
+kind: Settings
+metadata:
+  name: default
+  namespace: {{ .Release.Namespace }}
+  annotations:
+    "helm.sh/hook": pre-install
+spec:
+  bindAddr: 0.0.0.0:{{ .Values.gloo.deployment.xdsPort }}
+  discoveryNamespace: {{ .Values.settings.writeNamespace }}
+  kubernetesArtifactSource: {}
+  kubernetesConfigSource: {}
+  kubernetesSecretSource: {}
+  refreshRate: 60s
+
+{{- if .Values.settings.extensions }}
+  extensions:
+{{- toYaml .Values.settings.extensions | nindent 4 }}
+{{- end }}
+
+{{- with .Values.settings.watchNamespaces }}
+  watchNamespaces:
+  {{- range . }}
+  - {{ . }}
+  {{- end }}
+{{- end }}
+
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml b/vnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
new file mode 100755 (executable)
index 0000000..35fb5eb
--- /dev/null
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+    name: gloo-role-gateway
+    labels:
+        app: gloo
+        gloo: rbac
+rules:
+- apiGroups: [""]
+  resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+  verbs: ["*"]
+- apiGroups: [""]
+  resources: ["namespaces"]
+  verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+  resources: ["customresourcedefinitions"]
+  verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+  resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+  verbs: ["*"]
+- apiGroups: ["gateway.solo.io"]
+  resources: ["virtualservices", "gateways"]
+  verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml b/vnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
new file mode 100755 (executable)
index 0000000..15215b9
--- /dev/null
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+    name: gloo-role-ingress
+    labels:
+        app: gloo
+        gloo: rbac
+rules:
+- apiGroups: [""]
+  resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+  verbs: ["*"]
+- apiGroups: [""]
+  resources: ["namespaces"]
+  verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+  resources: ["customresourcedefinitions"]
+  verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+  resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+  verbs: ["*"]
+- apiGroups: ["extensions", ""]
+  resources: ["ingresses"]
+  verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml b/vnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
new file mode 100755 (executable)
index 0000000..1bd2b95
--- /dev/null
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+    name: gloo-role-knative
+    labels:
+        app: gloo
+        gloo: rbac
+rules:
+- apiGroups: [""]
+  resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+  verbs: ["*"]
+- apiGroups: [""]
+  resources: ["namespaces"]
+  verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+  resources: ["customresourcedefinitions"]
+  verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+  resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+  verbs: ["*"]
+- apiGroups: ["networking.internal.knative.dev"]
+  resources: ["clusteringresses"]
+  verbs: ["get", "list", "watch"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml b/vnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
new file mode 100755 (executable)
index 0000000..6219891
--- /dev/null
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: gloo-role-binding-gateway-{{ .Release.Namespace }}
+  labels:
+    app: gloo
+    gloo: rbac
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: gloo-role-gateway
+  apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml b/vnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
new file mode 100755 (executable)
index 0000000..7ef5cba
--- /dev/null
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: gloo-role-binding-ingress-{{ .Release.Namespace }}
+  labels:
+    app: gloo
+    gloo: rbac
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: gloo-role-ingress
+  apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml b/vnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
new file mode 100755 (executable)
index 0000000..5f05de9
--- /dev/null
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: gloo-role-binding-knative-{{ .Release.Namespace }}
+  labels:
+    app: gloo
+    gloo: rbac
+subjects:
+- kind: ServiceAccount
+  name: default
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  kind: ClusterRole
+  name: gloo-role-knative
+  apiGroup: rbac.authorization.k8s.io
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml
new file mode 100755 (executable)
index 0000000..b3d8423
--- /dev/null
@@ -0,0 +1,57 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: gloo
+  name: gloo
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.gloo.deployment.replicas }}
+  selector:
+    matchLabels:
+      gloo: gloo
+  template:
+    metadata:
+      labels:
+        gloo: gloo
+      {{- if .Values.gloo.deployment.stats }}
+      annotations:
+        prometheus.io/path: /metrics
+        prometheus.io/port: "9091"
+        prometheus.io/scrape: "true"
+      {{- end}}
+    spec:
+      containers:
+      - image: "{{ .Values.gloo.deployment.image.repository }}:{{ .Values.gloo.deployment.image.tag }}"
+        imagePullPolicy: {{ .Values.gloo.deployment.image.pullPolicy }}
+        name: gloo
+        resources:
+          requests:
+            cpu: 1
+            memory: 256Mi
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          runAsNonRoot: true
+          runAsUser: 10101
+          capabilities:
+            drop:
+            - ALL
+        ports:
+        - containerPort: {{ .Values.gloo.deployment.xdsPort }}
+          name: grpc
+          protocol: TCP
+        env:
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                fieldPath: metadata.namespace
+        {{- if .Values.gloo.deployment.stats }}
+          - name: START_STATS_SERVER
+            value: "true"
+        {{- end}}
+      {{- if .Values.gloo.deployment.image.pullSecret }}
+      imagePullSecrets:
+        - name: {{ .Values.gloo.deployment.image.pullSecret }}{{end}}
+
diff --git a/vnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml
new file mode 100755 (executable)
index 0000000..ab49ea3
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: gloo
+    gloo: gloo
+  name: gloo
+  namespace: {{ .Release.Namespace }}
+spec:
+{{ if .Values.gloo.deployment.externalTrafficPolicy }}
+  externalTrafficPolicy: {{ .Values.gloo.deployment.externalTrafficPolicy }}
+{{- end }}
+  ports:
+  - name: grpc
+    port: {{ .Values.gloo.deployment.xdsPort }}
+    protocol: TCP
+  selector:
+    gloo: gloo
diff --git a/vnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml
new file mode 100755 (executable)
index 0000000..1a44e92
--- /dev/null
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: discovery
+  name: discovery
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.discovery.deployment.replicas }}
+  selector:
+    matchLabels:
+      gloo: discovery
+  template:
+    metadata:
+      labels:
+        gloo: discovery
+      {{- if .Values.discovery.deployment.stats }}
+      annotations:
+        prometheus.io/path: /metrics
+        prometheus.io/port: "9091"
+        prometheus.io/scrape: "true"
+      {{- end}}
+    spec:
+      containers:
+      - image: "{{ .Values.discovery.deployment.image.repository }}:{{ .Values.discovery.deployment.image.tag }}"
+        imagePullPolicy: {{ .Values.discovery.deployment.image.pullPolicy }}
+        name: discovery
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          runAsNonRoot: true
+          runAsUser: 10101
+          capabilities:
+            drop:
+            - ALL
+        env:
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                fieldPath: metadata.namespace
+        {{- if .Values.discovery.deployment.stats }}
+          - name: START_STATS_SERVER
+            value: "true"
+        {{- end}}
+
diff --git a/vnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml
new file mode 100755 (executable)
index 0000000..0a32241
--- /dev/null
@@ -0,0 +1,47 @@
+{{- if .Values.gateway.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: gateway
+  name: gateway
+  namespace: {{ .Release.Namespace }}
+spec:
+  replicas: {{ .Values.gateway.deployment.replicas }}
+  selector:
+    matchLabels:
+      gloo: gateway
+  template:
+    metadata:
+      labels:
+        gloo: gateway
+      {{- if .Values.gateway.deployment.stats }}
+      annotations:
+        prometheus.io/path: /metrics
+        prometheus.io/port: "9091"
+        prometheus.io/scrape: "true"
+      {{- end}}
+    spec:
+      containers:
+      - image: "{{ .Values.gateway.deployment.image.repository }}:{{ .Values.gateway.deployment.image.tag }}"
+        imagePullPolicy: {{ .Values.gateway.deployment.image.pullPolicy }}
+        name: gateway
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          runAsNonRoot: true
+          runAsUser: 10101
+          capabilities:
+            drop:
+            - ALL
+        env:
+          - name: POD_NAMESPACE
+            valueFrom:
+              fieldRef:
+                fieldPath: metadata.namespace
+        {{- if .Values.gateway.deployment.stats }}
+          - name: START_STATS_SERVER
+            value: "true"
+        {{- end}}
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
new file mode 100755 (executable)
index 0000000..bb54e8f
--- /dev/null
@@ -0,0 +1,67 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  labels:
+    app: gloo
+    gloo: {{ $key }}
+  name: {{ $key }}
+  namespace: {{ $.Release.Namespace }}
+spec:
+  replicas: {{ $spec.deployment.replicas }}
+  selector:
+    matchLabels:
+      gloo: {{ $key }}
+  template:
+    metadata:
+      labels:
+        gloo: {{ $key }}
+{{- with $spec.deployment.extraAnnotations }}
+      annotations:
+{{toYaml  . | indent 8}}{{- end }}
+    spec:
+      containers:
+      - args: ["--disable-hot-restart"]
+        env:
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        image: {{ $spec.deployment.image.repository }}:{{ $spec.deployment.image.tag }}
+        imagePullPolicy: {{ $spec.deployment.image.pullPolicy }}
+        name: gateway-proxy
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - ALL
+            add:
+            - NET_BIND_SERVICE
+        ports:
+        - containerPort: {{ $spec.deployment.httpPort }}
+          name: http
+          protocol: TCP
+        - containerPort: {{ $spec.deployment.httpsPort }}
+          name: https
+          protocol: TCP
+{{- with $spec.deployment.extraPorts }}
+{{toYaml  . | indent 8}}{{- end }}
+        volumeMounts:
+        - mountPath: /etc/envoy
+          name: envoy-config
+      {{- if $spec.deployment.image.pullSecret }}
+      imagePullSecrets:
+        - name: {{ $spec.deployment.image.pullSecret }}{{end}}
+      volumes:
+      - configMap:
+          name: {{ $key }}-envoy-config
+        name: envoy-config
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml
new file mode 100755 (executable)
index 0000000..f0b7d34
--- /dev/null
@@ -0,0 +1,35 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: gloo
+    gloo: {{ $key }}
+  name: {{ $key }}
+  namespace: {{ $.Release.Namespace }}
+  {{- with $spec.service.extraAnnotations }}
+  annotations:
+{{toYaml  . | indent 8}}{{- end }}
+spec:
+  ports:
+  - port: {{ $spec.service.httpPort }}
+    targetPort: {{ $spec.deployment.httpPort }}
+    protocol: TCP
+    name: http
+  - port: {{ $spec.service.httpsPort }}
+    targetPort: {{ $spec.deployment.httpsPort }}
+    protocol: TCP
+    name: https
+  selector:
+    gloo: {{ $key }}
+  type: {{ $spec.service.type }}
+  {{- if and (eq $spec.service.type "ClusterIP") $spec.service.clusterIP }}
+  clusterIP: {{ $spec.service.clusterIP }}
+  {{- end }}
+  {{- if and (eq $spec.service.type "LoadBalancer") $spec.service.loadBalancerIP }}
+  loadBalancerIP: {{ $spec.service.loadBalancerIP }}
+  {{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml b/vnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
new file mode 100755 (executable)
index 0000000..03c5a92
--- /dev/null
@@ -0,0 +1,54 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+# config_map
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ $key }}-envoy-config
+  namespace: {{ $.Release.Namespace }}
+  labels:
+    app: gloo
+    gloo: {{ $key }}
+data:
+{{ if (empty $spec.configMap.data) }}
+  envoy.yaml: |
+    node:
+      cluster: gateway
+      id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+      metadata:
+        # this line must match !
+        role: "{{ "{{" }}.PodNamespace{{ "}}" }}~gateway-proxy"
+    static_resources:
+      clusters:
+      - name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+        connect_timeout: 5.000s
+        load_assignment:
+          cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+          endpoints:
+          - lb_endpoints:
+            - endpoint:
+                address:
+                  socket_address:
+                    address: gloo.{{ $.Release.Namespace }}.svc.cluster.local
+                    port_value: {{ $.Values.gloo.deployment.xdsPort }}
+        http2_protocol_options: {}
+        type: STRICT_DNS
+    dynamic_resources:
+      ads_config:
+        api_type: GRPC
+        grpc_services:
+        - envoy_grpc: {cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}}
+      cds_config:
+        ads: {}
+      lds_config:
+        ads: {}
+    admin:
+      access_log_path: /dev/null
+      address:
+        socket_address:
+          address: 127.0.0.1
+          port_value: 19000
+{{- else}}{{ toYaml $spec.configMap.data | indent 2}}{{- end}}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/values-ingress.yaml b/vnfs/DAaaS/00-init/gloo/values-ingress.yaml
new file mode 100755 (executable)
index 0000000..98dd42a
--- /dev/null
@@ -0,0 +1,74 @@
+crds:
+  create: true
+discovery:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/discovery
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+gateway:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gateway
+      tag: ""
+    replicas: 1
+    stats: false
+  enabled: false
+gatewayProxies:
+  gateway-proxy:
+    configMap:
+      data: null
+    deployment:
+      httpPort: "8080"
+      httpsPort: "8443"
+      image:
+        pullPolicy: Always
+        repository: quay.io/solo-io/gloo-envoy-wrapper
+        tag: ""
+      replicas: 1
+      stats: false
+    service:
+      httpPort: "80"
+      httpsPort: "443"
+      type: LoadBalancer
+gloo:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gloo
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+    xdsPort: "9977"
+ingress:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/ingress
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+  enabled: true
+ingressProxy:
+  configMap: {}
+  deployment:
+    httpPort: "80"
+    httpsPort: "443"
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gloo-envoy-wrapper
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+namespace:
+  create: false
+rbac:
+  create: true
+settings:
+  integrations:
+    knative:
+      enabled: false
+  writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/00-init/gloo/values-knative.yaml b/vnfs/DAaaS/00-init/gloo/values-knative.yaml
new file mode 100755 (executable)
index 0000000..c53ca1a
--- /dev/null
@@ -0,0 +1,72 @@
+crds:
+  create: true
+discovery:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/discovery
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+gateway:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gateway
+      tag: ""
+    replicas: 1
+    stats: false
+  enabled: false
+gatewayProxies:
+  gateway-proxy:
+    configMap:
+      data: null
+    deployment:
+      httpPort: "8080"
+      httpsPort: "8443"
+      image:
+        pullPolicy: Always
+        repository: quay.io/solo-io/gloo-envoy-wrapper
+        tag: ""
+      replicas: 1
+      stats: false
+    service:
+      httpPort: "80"
+      httpsPort: "443"
+      type: LoadBalancer
+gloo:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gloo
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+    xdsPort: "9977"
+ingress:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/ingress
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+  enabled: false
+namespace:
+  create: false
+rbac:
+  create: true
+settings:
+  integrations:
+    knative:
+      enabled: true
+      proxy:
+        httpPort: "80"
+        httpsPort: "443"
+        image:
+          pullPolicy: Always
+          repository: quay.io/solo-io/gloo-envoy-wrapper
+          tag: 0.13.18
+        replicas: 1
+        stats: false
+  writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/00-init/gloo/values.yaml b/vnfs/DAaaS/00-init/gloo/values.yaml
new file mode 100755 (executable)
index 0000000..daeab0c
--- /dev/null
@@ -0,0 +1,56 @@
+crds:
+  create: true
+discovery:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/discovery
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+gateway:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gateway
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+  enabled: true
+gatewayProxies:
+  gateway-proxy:
+    configMap:
+      data: null
+    deployment:
+      httpPort: "8080"
+      httpsPort: "8443"
+      image:
+        pullPolicy: Always
+        repository: quay.io/solo-io/gloo-envoy-wrapper
+        tag: 0.13.18
+      replicas: 1
+      stats: false
+    service:
+      httpPort: "80"
+      httpsPort: "443"
+      type: LoadBalancer
+gloo:
+  deployment:
+    image:
+      pullPolicy: Always
+      repository: quay.io/solo-io/gloo
+      tag: 0.13.18
+    replicas: 1
+    stats: false
+    xdsPort: "9977"
+ingress:
+  enabled: false
+namespace:
+  create: false
+rbac:
+  create: true
+settings:
+  integrations:
+    knative:
+      enabled: false
+  writeNamespace: gloo-system