deploy: modify nfs daemonset to use cephcsi nfs nodeserver

This commit makes modification to nfs daemonset to use
nfs nodeserver. `nfs.NetNamespaceFilePath` example is
added.

Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
Rakshith R 2022-07-26 16:02:40 +05:30 committed by mergify[bot]
parent 3d3c029471
commit 48d66d6cfd
4 changed files with 116 additions and 129 deletions

View File

@ -2,154 +2,134 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: csi-nfs-node name: csi-nfsplugin
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: csi-nfs-node app: csi-nfsplugin
template: template:
metadata: metadata:
labels: labels:
app: csi-nfs-node app: csi-nfsplugin
spec: spec:
serviceAccountName: nfs-csi-nodeplugin
priorityClassName: system-node-critical
hostNetwork: true
hostPID: true
# to use e.g. Rook orchestrated cluster, and mons' FQDN is
# resolved through k8s service, set dns policy to cluster first
dnsPolicy: ClusterFirstWithHostNet
containers: containers:
- args: - name: driver-registrar
- --csi-address=/csi/csi.sock # This is necessary only for systems with SELinux, where
- --probe-timeout=3s # non-privileged sidecar containers cannot access unix domain socket
- --health-port=29653 # created by privileged CSI driver container.
- --v=2 securityContext:
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0 privileged: true
imagePullPolicy: IfNotPresent allowPrivilegeEscalation: true
name: liveness-probe image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
resources: args:
limits: - "--v=1"
memory: 100Mi - "--csi-address=/csi/csi.sock"
requests: - "--kubelet-registration-path=/var/lib/kubelet/plugins/nfs.csi.ceph.com/csi.sock"
cpu: 10m
memory: 20Mi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --v=1
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
env: env:
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/nfs.csi.ceph.com/csi.sock
- name: KUBE_NODE_NAME - name: KUBE_NODE_NAME
valueFrom: valueFrom:
fieldRef: fieldRef:
apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 volumeMounts:
imagePullPolicy: IfNotPresent - name: socket-dir
livenessProbe: mountPath: /csi
exec: - name: registration-dir
command: mountPath: /registration
- /csi-node-driver-registrar - name: csi-nfsplugin
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
name: node-driver-registrar
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
securityContext: securityContext:
privileged: true privileged: true
terminationMessagePath: /dev/termination-log capabilities:
terminationMessagePolicy: File add: ["SYS_ADMIN"]
volumeMounts: allowPrivilegeEscalation: true
- mountPath: /csi # for stable functionality replace canary with latest release version
name: socket-dir image: quay.io/cephcsi/cephcsi:canary
- mountPath: /registration args:
name: registration-dir - "--nodeid=$(NODE_ID)"
- args: - "--type=nfs"
- -v=1 - "--nodeserver=true"
- --drivername=nfs.csi.ceph.com - "--endpoint=$(CSI_ENDPOINT)"
- --nodeid=$(NODE_ID) - "--v=5"
- --endpoint=$(CSI_ENDPOINT) - "--drivername=nfs.csi.ceph.com"
- "--enableprofiling=false"
env: env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:
fieldRef: fieldRef:
apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///csi/csi.sock value: unix:///csi/csi.sock
image: registry.k8s.io/sig-storage/nfsplugin:v4.0.0 imagePullPolicy: "IfNotPresent"
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 10
name: nfs
ports:
- containerPort: 29653
hostPort: 29653
name: healthz
protocol: TCP
resources:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: true
capabilities:
add:
- SYS_ADMIN
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts: volumeMounts:
- mountPath: /csi - name: socket-dir
name: socket-dir mountPath: /csi
- mountPath: /var/lib/kubelet/pods - name: mountpoint-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional mountPropagation: Bidirectional
name: pods-mount-dir - name: plugin-dir
dnsPolicy: ClusterFirstWithHostNet mountPath: /var/lib/kubelet/plugins
hostNetwork: true mountPropagation: "Bidirectional"
nodeSelector: - name: host-sys
kubernetes.io/os: linux mountPath: /sys
restartPolicy: Always - name: etc-selinux
schedulerName: default-scheduler mountPath: /etc/selinux
securityContext: {} readOnly: true
serviceAccountName: nfs-csi-nodeplugin - name: lib-modules
terminationGracePeriodSeconds: 30 mountPath: /lib/modules
tolerations: readOnly: true
- operator: Exists - name: host-dev
mountPath: /dev
- name: host-mount
mountPath: /run/mount
- name: ceph-config
mountPath: /etc/ceph/
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
volumes: volumes:
- hostPath: - name: socket-dir
path: /var/lib/kubelet/plugins/nfs.csi.ceph.com hostPath:
path: /var/lib/kubelet/plugins/nfs.csi.ceph.com/
type: DirectoryOrCreate type: DirectoryOrCreate
name: socket-dir - name: registration-dir
- hostPath: hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: mountpoint-dir
hostPath:
path: /var/lib/kubelet/pods path: /var/lib/kubelet/pods
type: DirectoryOrCreate
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins
type: Directory type: Directory
name: pods-mount-dir - name: host-sys
- hostPath: hostPath:
path: /var/lib/kubelet/plugins_registry path: /sys
type: Directory - name: etc-selinux
name: registration-dir hostPath:
updateStrategy: path: /etc/selinux
rollingUpdate: - name: lib-modules
maxSurge: 0 hostPath:
maxUnavailable: 1 path: /lib/modules
type: RollingUpdate - name: host-dev
hostPath:
path: /dev
- name: host-mount
hostPath:
path: /run/mount
- name: ceph-config
configMap:
name: ceph-config
- name: ceph-csi-config
configMap:
name: ceph-csi-config

View File

@ -43,7 +43,7 @@ var (
nfsNodePluginPSP = "csi-nodeplugin-psp.yaml" nfsNodePluginPSP = "csi-nodeplugin-psp.yaml"
nfsRookCephNFS = "rook-nfs.yaml" nfsRookCephNFS = "rook-nfs.yaml"
nfsDeploymentName = "csi-nfsplugin-provisioner" nfsDeploymentName = "csi-nfsplugin-provisioner"
nfsDeamonSetName = "csi-nfs-node" nfsDeamonSetName = "csi-nfsplugin"
nfsDirPath = "../deploy/nfs/kubernetes/" nfsDirPath = "../deploy/nfs/kubernetes/"
nfsExamplePath = examplePath + "nfs/" nfsExamplePath = examplePath + "nfs/"
nfsPoolName = ".nfs" nfsPoolName = ".nfs"
@ -235,7 +235,7 @@ func unmountNFSVolume(f *framework.Framework, appName, pvcName string) error {
cmd, cmd,
nfsDeamonSetName, nfsDeamonSetName,
pod.Spec.NodeName, pod.Spec.NodeName,
"nfs", // name of the container "csi-nfsplugin", // name of the container
cephCSINamespace) cephCSINamespace)
if stdErr != "" { if stdErr != "" {
e2elog.Logf("StdErr occurred: %s", stdErr) e2elog.Logf("StdErr occurred: %s", stdErr)
@ -299,7 +299,7 @@ var _ = Describe("nfs", func() {
// log provisioner // log provisioner
logsCSIPods("app=csi-nfsplugin-provisioner", c) logsCSIPods("app=csi-nfsplugin-provisioner", c)
// log node plugin // log node plugin
logsCSIPods("app=csi-nfs-node", c) logsCSIPods("app=csi-nfsplugin", c)
// log all details from the namespace where Ceph-CSI is deployed // log all details from the namespace where Ceph-CSI is deployed
framework.DumpAllNamespaceInfo(c, cephCSINamespace) framework.DumpAllNamespaceInfo(c, cephCSINamespace)

View File

@ -52,15 +52,15 @@ option `clusterID`, can now be created on the cluster.
## Running CephCSI with pod networking ## Running CephCSI with pod networking
The current problem with Pod Networking, is when a CephFS/RBD volume is mounted The current problem with Pod Networking, is when a CephFS/RBD/NFS volume is mounted
in a pod using Ceph CSI and then the CSI CephFS/RBD plugin is restarted or in a pod using Ceph CSI and then the CSI CephFS/RBD/NFS plugin is restarted or
terminated (e.g. by restarting or deleting its DaemonSet), all operations on terminated (e.g. by restarting or deleting its DaemonSet), all operations on
the volume become blocked, even after restarting the CSI pods. the volume become blocked, even after restarting the CSI pods.
The only workaround is to restart the node where the Ceph CSI plugin pod was The only workaround is to restart the node where the Ceph CSI plugin pod was
restarted. This can be mitigated by running the `rbd map`/`mount -t` commands restarted. This can be mitigated by running the `rbd map`/`mount -t` commands
in a different network namespace which does not get deleted when the CSI in a different network namespace which does not get deleted when the CSI
CephFS/RBD plugin is restarted or terminated. CephFS/RBD/NFS plugin is restarted or terminated.
If someone wants to run the CephCSI with the pod networking they can still do If someone wants to run the CephCSI with the pod networking they can still do
by setting the `netNamespaceFilePath`. If this path is set CephCSI will execute by setting the `netNamespaceFilePath`. If this path is set CephCSI will execute

View File

@ -24,6 +24,10 @@ kind: ConfigMap
# path for the Ceph cluster identified by the <cluster-id>, This will be used # path for the Ceph cluster identified by the <cluster-id>, This will be used
# by the CephFS CSI plugin to execute the mount -t in the # by the CephFS CSI plugin to execute the mount -t in the
# network namespace specified by the "cephFS.netNamespaceFilePath". # network namespace specified by the "cephFS.netNamespaceFilePath".
# The "nfs.netNamespaceFilePath" fields are the various network namespace
# path for the Ceph cluster identified by the <cluster-id>, This will be used
# by the NFS CSI plugin to execute the mount -t in the
# network namespace specified by the "nfs.netNamespaceFilePath".
# The "rbd.netNamespaceFilePath" fields are the various network namespace # The "rbd.netNamespaceFilePath" fields are the various network namespace
# path for the Ceph cluster identified by the <cluster-id>, This will be used # path for the Ceph cluster identified by the <cluster-id>, This will be used
# by the RBD CSI plugin to execute the rbd map/unmap in the # by the RBD CSI plugin to execute the rbd map/unmap in the
@ -60,6 +64,9 @@ data:
"subvolumeGroup": "<subvolumegroup for cephFS volumes>" "subvolumeGroup": "<subvolumegroup for cephFS volumes>"
"netNamespaceFilePath": "<kubeletRootPath>/plugins/cephfs.csi.ceph.com/net", "netNamespaceFilePath": "<kubeletRootPath>/plugins/cephfs.csi.ceph.com/net",
} }
"nfs": {
"netNamespaceFilePath": "<kubeletRootPath>/plugins/nfs.csi.ceph.com/net",
}
} }
] ]
cluster-mapping.json: |- cluster-mapping.json: |-