Add support for pmem-csi plugin and e2e test 76/107676/34
authorChen, Tingjie <tingjie.chen@intel.com>
Mon, 25 May 2020 06:26:26 +0000 (14:26 +0800)
committerChen, Tingjie <tingjie.chen@intel.com>
Sat, 30 May 2020 03:48:31 +0000 (11:48 +0800)
Issue-ID: MULTICLOUD-1046
Change-Id: I1853e071a99702c5e6f7ba9ca819746576fd0aca
Signed-off-by: Chen, Tingjie <tingjie.chen@intel.com>
kud/deployment_infra/images/pmem-csi-direct.yaml [new file with mode: 0644]
kud/deployment_infra/images/pmem-csi-lvm.yaml [new file with mode: 0644]
kud/deployment_infra/playbooks/configure-optane.yml [new file with mode: 0644]
kud/deployment_infra/playbooks/deploy_optane.sh [new file with mode: 0755]
kud/deployment_infra/playbooks/install_optane.sh [new file with mode: 0755]
kud/deployment_infra/playbooks/kud-vars.yml
kud/deployment_infra/playbooks/preconfigure-optane.yml [new file with mode: 0644]
kud/deployment_infra/playbooks/setup-ca-kubernetes.sh [new file with mode: 0755]
kud/deployment_infra/playbooks/setup-ca.sh [new file with mode: 0755]
kud/hosting_providers/vagrant/installer.sh
kud/tests/optane.sh [new file with mode: 0755]

diff --git a/kud/deployment_infra/images/pmem-csi-direct.yaml b/kud/deployment_infra/images/pmem-csi-direct.yaml
new file mode 100644 (file)
index 0000000..7591a49
--- /dev/null
@@ -0,0 +1,375 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-controller
+  namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-external-provisioner-cfg
+  namespace: default
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - endpoints
+  verbs:
+  - get
+  - watch
+  - list
+  - delete
+  - update
+  - create
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - get
+  - watch
+  - list
+  - delete
+  - update
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-external-provisioner-runner
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - persistentvolumes
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - persistentvolumeclaims
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - list
+  - watch
+  - create
+  - update
+  - patch
+- apiGroups:
+  - snapshot.storage.k8s.io
+  resources:
+  - volumesnapshots
+  verbs:
+  - get
+  - list
+- apiGroups:
+  - snapshot.storage.k8s.io
+  resources:
+  - volumesnapshotcontents
+  verbs:
+  - get
+  - list
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - csinodes
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-csi-provisioner-role-cfg
+  namespace: default
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: pmem-csi-external-provisioner-cfg
+subjects:
+- kind: ServiceAccount
+  name: pmem-csi-controller
+  namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-csi-provisioner-role
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: pmem-csi-external-provisioner-runner
+subjects:
+- kind: ServiceAccount
+  name: pmem-csi-controller
+  namespace: default
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-controller
+  namespace: default
+spec:
+  ports:
+  - port: 10000
+  selector:
+    app: pmem-csi-controller
+    pmem-csi.intel.com/deployment: direct-production
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-metrics
+  namespace: default
+spec:
+  ports:
+  - port: 10010
+  selector:
+    app: pmem-csi-controller
+    pmem-csi.intel.com/deployment: direct-production
+  type: NodePort
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-controller
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: pmem-csi-controller
+      pmem-csi.intel.com/deployment: direct-production
+  serviceName: pmem-csi-controller
+  template:
+    metadata:
+      labels:
+        app: pmem-csi-controller
+        pmem-csi.intel.com/deployment: direct-production
+        pmem-csi.intel.com/webhook: ignore
+    spec:
+      containers:
+      - command:
+        - /usr/local/bin/pmem-csi-driver
+        - -v=3
+        - -drivername=pmem-csi.intel.com
+        - -mode=controller
+        - -endpoint=unix:///csi/csi-controller.sock
+        - -registryEndpoint=tcp://0.0.0.0:10000
+        - -metricsListen=:10010
+        - -nodeid=$(KUBE_NODE_NAME)
+        - -caFile=/certs/ca.crt
+        - -certFile=/certs/tls.crt
+        - -keyFile=/certs/tls.key
+        env:
+        - name: KUBE_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: TERMINATION_LOG_PATH
+          value: /tmp/termination-log
+        image: intel/pmem-csi-driver:canary
+        imagePullPolicy: Always
+        name: pmem-driver
+        securityContext:
+          privileged: true
+        terminationMessagePath: /tmp/termination-log
+        volumeMounts:
+        - mountPath: /certs/
+          name: registry-cert
+        - mountPath: /csi
+          name: plugin-socket-dir
+      - args:
+        - --timeout=5m
+        - --v=3
+        - --csi-address=/csi/csi-controller.sock
+        - --feature-gates=Topology=true
+        - --strict-topology=true
+        - --timeout=5m
+        - --strict-topology=true
+        image: quay.io/k8scsi/csi-provisioner:v1.2.1
+        imagePullPolicy: Always
+        name: external-provisioner
+        volumeMounts:
+        - mountPath: /csi
+          name: plugin-socket-dir
+      serviceAccount: pmem-csi-controller
+      volumes:
+      - emptyDir: null
+        name: plugin-socket-dir
+      - name: registry-cert
+        secret:
+          secretName: pmem-csi-registry-secrets
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi-node
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: pmem-csi-node
+      pmem-csi.intel.com/deployment: direct-production
+  template:
+    metadata:
+      labels:
+        app: pmem-csi-node
+        pmem-csi.intel.com/deployment: direct-production
+        pmem-csi.intel.com/webhook: ignore
+    spec:
+      containers:
+      - command:
+        - /usr/local/bin/pmem-csi-driver
+        - -deviceManager=ndctl
+        - -v=3
+        - -drivername=pmem-csi.intel.com
+        - -mode=node
+        - -endpoint=$(CSI_ENDPOINT)
+        - -nodeid=$(KUBE_NODE_NAME)
+        - -controllerEndpoint=tcp://$(KUBE_POD_IP):10001
+        - -registryEndpoint=tcp://pmem-csi-controller:10000
+        - -caFile=/certs/ca.crt
+        - -certFile=/certs/tls.crt
+        - -keyFile=/certs/tls.key
+        - -statePath=/var/lib/pmem-csi.intel.com
+        env:
+        - name: CSI_ENDPOINT
+          value: unix:///var/lib/pmem-csi.intel.com/csi.sock
+        - name: KUBE_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: KUBE_POD_IP
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: status.podIP
+        - name: TERMINATION_LOG_PATH
+          value: /tmp/termination-log
+        image: intel/pmem-csi-driver:canary
+        imagePullPolicy: Always
+        name: pmem-driver
+        securityContext:
+          privileged: true
+        terminationMessagePath: /tmp/termination-log
+        volumeMounts:
+        - mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
+          mountPropagation: Bidirectional
+          name: mountpoint-dir
+        - mountPath: /var/lib/kubelet/pods
+          mountPropagation: Bidirectional
+          name: pods-dir
+        - mountPath: /certs/
+          name: registry-cert
+        - mountPath: /dev
+          name: dev-dir
+        - mountPath: /var/lib/pmem-csi.intel.com
+          mountPropagation: Bidirectional
+          name: pmem-state-dir
+        - mountPath: /sys
+          name: sys-dir
+      - args:
+        - -v=3
+        - --kubelet-registration-path=/var/lib/pmem-csi.intel.com/csi.sock
+        - --csi-address=/pmem-csi/csi.sock
+        image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
+        imagePullPolicy: Always
+        name: driver-registrar
+        volumeMounts:
+        - mountPath: /pmem-csi
+          name: pmem-state-dir
+        - mountPath: /registration
+          name: registration-dir
+      nodeSelector:
+        storage: pmem
+      volumes:
+      - hostPath:
+          path: /var/lib/kubelet/plugins_registry/
+          type: DirectoryOrCreate
+        name: registration-dir
+      - hostPath:
+          path: /var/lib/kubelet/plugins/kubernetes.io/csi
+          type: DirectoryOrCreate
+        name: mountpoint-dir
+      - hostPath:
+          path: /var/lib/kubelet/pods
+          type: DirectoryOrCreate
+        name: pods-dir
+      - name: registry-cert
+        secret:
+          secretName: pmem-csi-node-secrets
+      - hostPath:
+          path: /var/lib/pmem-csi.intel.com
+          type: DirectoryOrCreate
+        name: pmem-state-dir
+      - hostPath:
+          path: /dev
+          type: DirectoryOrCreate
+        name: dev-dir
+      - hostPath:
+          path: /sys
+          type: DirectoryOrCreate
+        name: sys-dir
+---
+apiVersion: storage.k8s.io/v1beta1
+kind: CSIDriver
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: direct-production
+  name: pmem-csi.intel.com
+spec:
+  attachRequired: false
+  podInfoOnMount: true
+  #volumeLifecycleModes:
+  #- Persistent
+  #- Ephemeral
diff --git a/kud/deployment_infra/images/pmem-csi-lvm.yaml b/kud/deployment_infra/images/pmem-csi-lvm.yaml
new file mode 100644 (file)
index 0000000..efe7c52
--- /dev/null
@@ -0,0 +1,401 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-controller
+  namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-external-provisioner-cfg
+  namespace: default
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - endpoints
+  verbs:
+  - get
+  - watch
+  - list
+  - delete
+  - update
+  - create
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - get
+  - watch
+  - list
+  - delete
+  - update
+  - create
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-external-provisioner-runner
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - persistentvolumes
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - delete
+- apiGroups:
+  - ""
+  resources:
+  - persistentvolumeclaims
+  verbs:
+  - get
+  - list
+  - watch
+  - update
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - list
+  - watch
+  - create
+  - update
+  - patch
+- apiGroups:
+  - snapshot.storage.k8s.io
+  resources:
+  - volumesnapshots
+  verbs:
+  - get
+  - list
+- apiGroups:
+  - snapshot.storage.k8s.io
+  resources:
+  - volumesnapshotcontents
+  verbs:
+  - get
+  - list
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - csinodes
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-csi-provisioner-role-cfg
+  namespace: default
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: pmem-csi-external-provisioner-cfg
+subjects:
+- kind: ServiceAccount
+  name: pmem-csi-controller
+  namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-csi-provisioner-role
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: pmem-csi-external-provisioner-runner
+subjects:
+- kind: ServiceAccount
+  name: pmem-csi-controller
+  namespace: default
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-controller
+  namespace: default
+spec:
+  ports:
+  - port: 10000
+  selector:
+    app: pmem-csi-controller
+    pmem-csi.intel.com/deployment: lvm-production
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-metrics
+  namespace: default
+spec:
+  ports:
+  - port: 10010
+  selector:
+    app: pmem-csi-controller
+    pmem-csi.intel.com/deployment: lvm-production
+  type: NodePort
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-controller
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: pmem-csi-controller
+      pmem-csi.intel.com/deployment: lvm-production
+  serviceName: pmem-csi-controller
+  template:
+    metadata:
+      labels:
+        app: pmem-csi-controller
+        pmem-csi.intel.com/deployment: lvm-production
+        pmem-csi.intel.com/webhook: ignore
+    spec:
+      containers:
+      - command:
+        - /usr/local/bin/pmem-csi-driver
+        - -v=3
+        - -drivername=pmem-csi.intel.com
+        - -mode=controller
+        - -endpoint=unix:///csi/csi-controller.sock
+        - -registryEndpoint=tcp://0.0.0.0:10000
+        - -metricsListen=:10010
+        - -nodeid=$(KUBE_NODE_NAME)
+        - -caFile=/certs/ca.crt
+        - -certFile=/certs/tls.crt
+        - -keyFile=/certs/tls.key
+        env:
+        - name: KUBE_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: TERMINATION_LOG_PATH
+          value: /tmp/termination-log
+        image: intel/pmem-csi-driver:canary
+        imagePullPolicy: Always
+        name: pmem-driver
+        securityContext:
+          privileged: true
+        terminationMessagePath: /tmp/termination-log
+        volumeMounts:
+        - mountPath: /certs/
+          name: registry-cert
+        - mountPath: /csi
+          name: plugin-socket-dir
+      - args:
+        - --timeout=5m
+        - --v=3
+        - --csi-address=/csi/csi-controller.sock
+        - --feature-gates=Topology=true
+        - --strict-topology=true
+        - --timeout=5m
+        - --strict-topology=true
+        image: quay.io/k8scsi/csi-provisioner:v1.2.1
+        imagePullPolicy: Always
+        name: external-provisioner
+        volumeMounts:
+        - mountPath: /csi
+          name: plugin-socket-dir
+      serviceAccount: pmem-csi-controller
+      volumes:
+      - emptyDir: null
+        name: plugin-socket-dir
+      - name: registry-cert
+        secret:
+          secretName: pmem-csi-registry-secrets
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi-node
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: pmem-csi-node
+      pmem-csi.intel.com/deployment: lvm-production
+  template:
+    metadata:
+      labels:
+        app: pmem-csi-node
+        pmem-csi.intel.com/deployment: lvm-production
+        pmem-csi.intel.com/webhook: ignore
+    spec:
+      containers:
+      - command:
+        - /usr/local/bin/pmem-csi-driver
+        - -deviceManager=lvm
+        - -v=3
+        - -drivername=pmem-csi.intel.com
+        - -mode=node
+        - -endpoint=$(CSI_ENDPOINT)
+        - -nodeid=$(KUBE_NODE_NAME)
+        - -controllerEndpoint=tcp://$(KUBE_POD_IP):10001
+        - -registryEndpoint=tcp://pmem-csi-controller:10000
+        - -caFile=/certs/ca.crt
+        - -certFile=/certs/tls.crt
+        - -keyFile=/certs/tls.key
+        - -statePath=/var/lib/pmem-csi.intel.com
+        env:
+        - name: CSI_ENDPOINT
+          value: unix:///var/lib/pmem-csi.intel.com/csi.sock
+        - name: KUBE_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: KUBE_POD_IP
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: status.podIP
+        - name: TERMINATION_LOG_PATH
+          value: /tmp/termination-log
+        image: intel/pmem-csi-driver:canary
+        imagePullPolicy: Always
+        name: pmem-driver
+        securityContext:
+          privileged: true
+        terminationMessagePath: /tmp/termination-log
+        volumeMounts:
+        - mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi
+          mountPropagation: Bidirectional
+          name: mountpoint-dir
+        - mountPath: /var/lib/kubelet/pods
+          mountPropagation: Bidirectional
+          name: pods-dir
+        - mountPath: /certs/
+          name: registry-cert
+        - mountPath: /dev
+          name: dev-dir
+        - mountPath: /var/lib/pmem-csi.intel.com
+          mountPropagation: Bidirectional
+          name: pmem-state-dir
+      - args:
+        - -v=3
+        - --kubelet-registration-path=/var/lib/pmem-csi.intel.com/csi.sock
+        - --csi-address=/pmem-csi/csi.sock
+        image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
+        imagePullPolicy: Always
+        name: driver-registrar
+        volumeMounts:
+        - mountPath: /pmem-csi
+          name: pmem-state-dir
+        - mountPath: /registration
+          name: registration-dir
+      initContainers:
+      - command:
+        - /usr/local/bin/pmem-ns-init
+        - -v=3
+        env:
+        - name: TERMINATION_LOG_PATH
+          value: /tmp/pmem-ns-init-termination-log
+        image: intel/pmem-csi-driver:canary
+        imagePullPolicy: Always
+        name: pmem-ns-init
+        securityContext:
+          privileged: true
+        terminationMessagePath: /tmp/pmem-ns-init-termination-log
+        volumeMounts:
+        - mountPath: /sys
+          name: sys-dir
+      - command:
+        - /usr/local/bin/pmem-vgm
+        - -v=3
+        env:
+        - name: TERMINATION_LOG_PATH
+          value: /tmp/pmem-vgm-termination-log
+        image: intel/pmem-csi-driver:canary
+        imagePullPolicy: Always
+        name: pmem-vgm
+        securityContext:
+          privileged: true
+        terminationMessagePath: /tmp/pmem-vgm-termination-log
+      nodeSelector:
+        storage: pmem
+      volumes:
+      - hostPath:
+          path: /var/lib/kubelet/plugins_registry/
+          type: DirectoryOrCreate
+        name: registration-dir
+      - hostPath:
+          path: /var/lib/kubelet/plugins/kubernetes.io/csi
+          type: DirectoryOrCreate
+        name: mountpoint-dir
+      - hostPath:
+          path: /var/lib/kubelet/pods
+          type: DirectoryOrCreate
+        name: pods-dir
+      - name: registry-cert
+        secret:
+          secretName: pmem-csi-node-secrets
+      - hostPath:
+          path: /var/lib/pmem-csi.intel.com
+          type: DirectoryOrCreate
+        name: pmem-state-dir
+      - hostPath:
+          path: /dev
+          type: DirectoryOrCreate
+        name: dev-dir
+      - hostPath:
+          path: /sys
+          type: DirectoryOrCreate
+        name: sys-dir
+---
+apiVersion: storage.k8s.io/v1beta1
+kind: CSIDriver
+metadata:
+  labels:
+    pmem-csi.intel.com/deployment: lvm-production
+  name: pmem-csi.intel.com
+spec:
+  attachRequired: false
+  podInfoOnMount: true
+  #volumeLifecycleModes:
+  #- Persistent
+  #- Ephemeral
diff --git a/kud/deployment_infra/playbooks/configure-optane.yml b/kud/deployment_infra/playbooks/configure-optane.yml
new file mode 100644 (file)
index 0000000..8e000aa
--- /dev/null
@@ -0,0 +1,15 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- import_playbook: preconfigure-optane.yml
+- hosts: localhost
+  tasks:
+    - name: Apply Optane PMEM CSI Daemonset
+      command: "{{ base_dest }}/optane/deploy_optane.sh"
diff --git a/kud/deployment_infra/playbooks/deploy_optane.sh b/kud/deployment_infra/playbooks/deploy_optane.sh
new file mode 100755 (executable)
index 0000000..cb50237
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+work_path="$(dirname -- "$(readlink -f -- "$0")")"
+ndctl_region=`ndctl list -R`
+if [[ $ndctl_region == "" ]] ; then
+    echo "No Optane Hardware!"
+else
+    echo "Optane Plugin start .."
+    /usr/local/bin/kubectl apply -f $work_path/pmem-csi-lvm.yaml
+fi
diff --git a/kud/deployment_infra/playbooks/install_optane.sh b/kud/deployment_infra/playbooks/install_optane.sh
new file mode 100755 (executable)
index 0000000..3f34536
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+# Precondition:
+# Optane PM related utilities download and configure.
+
+# collet and install ndctl and check hardware
+echo "[OPTANE] Install ndctl ..."
+apt install -y ndctl
+
+echo "[OPTANE] Check the NVDIMM hardware ..."
+ndctl_region=`ndctl list -R`
+if [[ $ndctl_region == "" ]] ; then
+    echo "No NVDIMM hardware, exit ..."
+    exit 0
+fi
+
+# get current folder path
+work_path="$(dirname -- "$(readlink -f -- "$0")")"
+node_name="$(kubectl get node -o jsonpath='{.items[0].metadata.name}')"
+
+# collet and install ipmctl
+echo "[OPTANE] Install ipmctl ..."
+cd $work_path
+wget https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/ipmctl/02.00.00.3474+really01.00.00.3469-1/ipmctl_02.00.00.3474+really01.00.00.3469.orig.tar.xz
+tar xvf ipmctl_02.00.00.3474+really01.00.00.3469.orig.tar.xz
+cd ipmctl-01.00.00.3469/
+
+echo "[OPTANE] Install ipmctl utilities"
+mkdir output && cd output
+apt install -y cmake build-essential pkg-config asciidoctor asciidoc libndctl-dev git
+gem install asciidoctor-pdf --pre
+
+add-apt-repository --yes ppa:jhli/libsafec
+apt update
+apt-get install -y libsafec-dev
+
+echo "[OPTANE] Build ipmctl ..."
+cmake -DRELEASE=ON -DCMAKE_INSTALL_PREFIX=/ ..
+make -j all
+make install
+
+cd $work_path
+
+echo "[OPTANE] Install cfssl tools ..."
+# collect cfssl tools
+curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o cfssl
+curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o cfssljson
+chmod a+x cfssl cfssljson
+cp -rf cfssl cfssljson /usr/bin/
+
+echo "[OPTANE] Create AppDirect Goal ..."
+# ipmctl setting
+#ipmctl delete -goal
+#ipmctl create -f -goal PersistentMemoryType=AppDirectNotInterleaved
+
+# Run certificates set-up script
+echo "[OPTANE] Run ca for kubernetes ..."
+./setup-ca-kubernetes.sh
+
+# deploy docker hub
+echo "[OPTANE] Set label node for storage pmem ..."
+kubectl label node $node_name storage=pmem
+
+echo "[OPTANE] kubelet CSIMigration set false ..."
+echo -e "featureGates:\n  CSIMigration: false" >> /var/lib/kubelet/config.yaml
+# deploy pmem-csi and applications
+# select two mode: lvm and direct
+#echo "[OPTANE] Create PMEM-CSI plugin service ..."
+#kubectl create -f ../images/pmem-csi-lvm.yaml
+# kubectl create -f pmem-csi-direct.yaml
+
index 7a25670..e5438ae 100644 (file)
@@ -73,3 +73,9 @@ qat_driver_source_type: "tarball"
 qat_driver_version: 1.7.l.4.6.0-00025
 qat_driver_url: "https://01.org/sites/default/files/downloads/{{ qat_package }}.tar.gz"
 qat_package: qat1.7.l.4.6.0-00025
+
+optane_dest: "{{ base_dest }}/optane"
+optane_ipmctl_source_type: "tarball"
+optane_ipmctl_version: 02.00.00.3474
+optane_ipmctl_url: "https://launchpad.net/ubuntu/+archive/primary/+sourcefiles/ipmctl/{{ optane_package }}.tar.xz"
+optane_ipmctl_package: ipmctl_02.00.00.3474+really01.00.00.3469.orig
diff --git a/kud/deployment_infra/playbooks/preconfigure-optane.yml b/kud/deployment_infra/playbooks/preconfigure-optane.yml
new file mode 100644 (file)
index 0000000..6462289
--- /dev/null
@@ -0,0 +1,85 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run the following task only if the SRIOV is set to True
+# i.e when SRIOV hardware is available
+- hosts: localhost
+  become: yes
+  pre_tasks:
+    - name: Load kud variables
+      include_vars:
+        file: kud-vars.yml
+  tasks:
+    - name: Create optane folder
+      file:
+        state: directory
+        path: "{{ optane_dest }}"
+      ignore_errors: yes
+
+- hosts: kube-node
+  become: yes
+  pre_tasks:
+    - name: Load kud variables
+      include_vars:
+        file: kud-vars.yml
+  tasks:
+    - name: Create OPTANE folder in the target destination
+      file:
+        state: directory
+        path: "{{ item }}"
+      with_items:
+        - optane
+    - copy:
+        src: "{{ playbook_dir }}/setup-ca-kubernetes.sh"
+        dest: optane
+    - name: Changing perm of "sh", adding "+x"
+      shell: "chmod +x setup-ca-kubernetes.sh"
+      args:
+        chdir: "optane"
+        warn: False
+    - copy:
+        src: "{{ playbook_dir }}/setup-ca.sh"
+        dest: optane
+    - name: Changing perm of "sh", adding "+x"
+      shell: "chmod +x setup-ca.sh"
+      args:
+        chdir: "optane"
+        warn: False
+    - copy:
+        src: "{{ playbook_dir }}/install_optane.sh"
+        dest: optane
+    - name: Changing perm of "sh", adding "+x"
+      shell: "chmod +x install_optane.sh"
+      args:
+        chdir: "optane"
+        warn: False
+    - copy:
+        src: "{{ playbook_dir }}/deploy_optane.sh"
+        dest: optane
+    - name: Changing perm of "sh", adding "+x"
+      shell: "chmod +x deploy_optane.sh"
+      args:
+        chdir: "optane"
+        warn: False
+    - copy:
+        src: "{{ playbook_dir }}/../images/pmem-csi-lvm.yaml"
+        dest: optane
+    - copy:
+        src: "{{ playbook_dir }}/../images/pmem-csi-direct.yaml"
+        dest: optane
+    - name: Run the install script and Re-evaluate the variable
+      command: optane/install_optane.sh
+      register: output
+    - name: restart kubelet services
+      become: yes
+      service:
+        name: kubelet
+        state: restarted
+
diff --git a/kud/deployment_infra/playbooks/setup-ca-kubernetes.sh b/kud/deployment_infra/playbooks/setup-ca-kubernetes.sh
new file mode 100755 (executable)
index 0000000..c436f1c
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/sh -e
+
+# This script generates certificates using setup-ca.sh and converts them into
+# the Kubernetes secrets that the PMEM-CSI deployments rely upon for
+# securing communication between PMEM-CSI components. Existing secrets
+# are updated with new certificates when running it again.
+
+# The script needs a functional kubectl that uses the target cluster.
+: ${KUBECTL:=kubectl}
+
+# The directory containing setup-ca*.sh.
+: ${TEST_DIRECTORY:=$(dirname $(readlink -f $0))}
+
+
+tmpdir=`mktemp -d`
+trap 'rm -r $tmpdir' EXIT
+
+# Generate certificates. They are not going to be needed again and will
+# be deleted together with the temp directory.
+WORKDIR="$tmpdir" "$TEST_DIRECTORY/setup-ca.sh"
+
+# This reads a file and encodes it for use in a secret.
+read_key () {
+    base64 -w 0 "$1"
+}
+
+# Read certificate files and turn them into Kubernetes secrets.
+#
+# -caFile (controller and all nodes)
+CA=$(read_key "$tmpdir/ca.pem")
+# -certFile (controller)
+REGISTRY_CERT=$(read_key "$tmpdir/pmem-registry.pem")
+# -keyFile (controller)
+REGISTRY_KEY=$(read_key "$tmpdir/pmem-registry-key.pem")
+# -certFile (same for all nodes)
+NODE_CERT=$(read_key "$tmpdir/pmem-node-controller.pem")
+# -keyFile (same for all nodes)
+NODE_KEY=$(read_key "$tmpdir/pmem-node-controller-key.pem")
+
+${KUBECTL} apply -f - <<EOF
+apiVersion: v1
+kind: Secret
+metadata:
+    name: pmem-csi-registry-secrets
+type: kubernetes.io/tls
+data:
+    ca.crt: ${CA}
+    tls.crt: ${REGISTRY_CERT}
+    tls.key: ${REGISTRY_KEY}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: pmem-csi-node-secrets
+type: Opaque
+data:
+    ca.crt: ${CA}
+    tls.crt: ${NODE_CERT}
+    tls.key: ${NODE_KEY}
+EOF
diff --git a/kud/deployment_infra/playbooks/setup-ca.sh b/kud/deployment_infra/playbooks/setup-ca.sh
new file mode 100755 (executable)
index 0000000..77addc7
--- /dev/null
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+# Directory to use for storing intermediate files.
+CA=${CA:="pmem-ca"}
+WORKDIR=${WORKDIR:-$(mktemp -d -u -t pmem-XXXX)}
+mkdir -p $WORKDIR
+cd $WORKDIR
+
+# Check for cfssl utilities.
+cfssl_found=1
+(command -v cfssl 2>&1 >/dev/null && command -v cfssljson 2>&1 >/dev/null) || cfssl_found=0
+if [ $cfssl_found -eq 0 ]; then
+    echo "cfssl tools not found, Please install cfssl and cfssljson."
+    exit 1
+fi
+
+# Generate CA certificates.
+<<EOF cfssl -loglevel=3 gencert -initca - | cfssljson -bare ca
+{
+    "CN": "$CA",
+    "key": {
+        "algo": "rsa",
+        "size": 2048
+    }
+}
+EOF
+
+# Generate server and client certificates.
+DEFAULT_CNS="pmem-registry pmem-node-controller"
+CNS="${DEFAULT_CNS} ${EXTRA_CNS:=""}"
+for name in ${CNS}; do
+  <<EOF cfssl -loglevel=3 gencert -ca=ca.pem -ca-key=ca-key.pem - | cfssljson -bare $name
+{
+    "CN": "$name",
+    "hosts": [
+        $(if [ "$name" = "pmem-registry" ]; then
+             # Some extra names needed for scheduler extender and webhook.
+             echo '"pmem-csi-scheduler", "pmem-csi-scheduler.default", "pmem-csi-scheduler.default.svc", "127.0.0.1",'
+             # And for metrics server.
+             echo '"pmem-csi-metrics", "pmem-csi-metrics.default", "pmem-csi-metrics.default.svc",'
+          fi
+        )
+        "$name"
+    ],
+    "key": {
+        "algo": "ecdsa",
+        "size": 256
+    }
+}
+EOF
+done
index c9ed886..7b8b28d 100755 (executable)
@@ -155,13 +155,13 @@ function install_addons {
     _install_ansible
     sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
     ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
-    for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk}; do
+    for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat optane cmk}; do
         echo "Deploying $addon using configure-$addon.yml playbook.."
         ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log
     done
     echo "Run the test cases if testing_enabled is set to true."
     if [[ "${testing_enabled}" == "true" ]]; then
-        for addon in ${KUD_ADDONS:-multus virtlet ovn4nfv nfd sriov qat cmk}; do
+        for addon in ${KUD_ADDONS:-multus virtlet ovn4nfv nfd sriov qat optane cmk}; do
             pushd $kud_tests
             bash ${addon}.sh
             popd
diff --git a/kud/tests/optane.sh b/kud/tests/optane.sh
new file mode 100755 (executable)
index 0000000..a8bf464
--- /dev/null
@@ -0,0 +1,89 @@
+#!/bin/bash
+# PDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+echo "[OPTANE-TEST] Check the NVDIMM hardware ..."
+ndctl_region=`ndctl list -R`
+if [[ $ndctl_region == "" ]] ; then
+    echo "No NVDIMM hardware, exit ..."
+    exit 0
+fi
+
+pod_sc_01=pod-sc-case-01
+pod_pvc_01=pod-pvc-case-01
+pod_app_01=pod-app-case-01
+
+cat << POD > $HOME/$pod_sc_01.yaml
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: pmem-csi-sc-ext4
+parameters:
+  csi.storage.k8s.io/fstype: ext4
+  eraseafter: "true"
+provisioner: pmem-csi.intel.com
+reclaimPolicy: Delete
+volumeBindingMode: Immediate
+POD
+
+cat << POD > $HOME/$pod_pvc_01.yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: pmem-csi-pvc-ext4
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 4Gi
+  storageClassName: pmem-csi-sc-ext4
+POD
+
+cat << POD > $HOME/$pod_app_01.yaml
+kind: Pod
+apiVersion: v1
+metadata:
+  name: my-csi-app-1
+spec:
+  containers:
+    - name: my-frontend
+      image: busybox
+      command: [ "sleep", "100000" ]
+      volumeMounts:
+      - mountPath: "/data"
+        name: my-csi-volume
+  volumes:
+  - name: my-csi-volume
+    persistentVolumeClaim:
+      claimName: pmem-csi-pvc-ext4
+POD
+
+kubectl apply -f $HOME/$pod_sc_01.yaml
+kubectl apply -f $HOME/$pod_pvc_01.yaml
+kubectl apply -f $HOME/$pod_app_01.yaml
+
+echo "Sleep for several minutes ..."
+sleep 300
+
+pvc_meta="$(kubectl get pvc -o jsonpath='{.items[0].metadata.name}')"
+pvc_status="$(kubectl get pvc -o jsonpath='{.items[0].status.phase}')"
+if [[ $pvc_meta == "pmem-csi-pvc-ext4" ]] && [[ $pvc_status == "Bound" ]] ; then
+    echo "[OPTANE] SUCCESS: created PMEM-CSI volume!"
+else
+    echo "[OPTANE] FAILED: cannot create PMEM-CSI volume!"
+fi
+
+echo "Wait and remove the test resource ..."
+sleep 60
+
+kubectl delete -f $HOME/$pod_sc_01.yaml
+kubectl delete -f $HOME/$pod_pvc_01.yaml
+kubectl delete -f $HOME/$pod_app_01.yaml
+