ceph-csi/deploy/rbd/helm/templates/provisioner-statefulset.yaml
ShyamsundarR c4a3675cec Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.

The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.

Tests to create PVCs before and after these changes look like so,

Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds

20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
  - Once was stalled for more than 8 minutes and cancelled the run

After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds

Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-07-01 14:10:14 +00:00

134 lines
4.8 KiB
YAML

kind: StatefulSet
apiVersion: apps/v1
metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceName: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
replicas: {{ .Values.provisioner.replicas }}
selector:
matchLabels:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ include "ceph-csi-rbd.name" . }}
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
serviceAccountName: {{ include "ceph-csi-rbd.serviceAccountName.provisioner" . }}
containers:
- name: csi-provisioner
image: "{{ .Values.provisioner.image.repository }}:{{ .Values.provisioner.image.tag }}"
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=60s"
- "--retry-interval-start=500ms"
env:
- name: ADDRESS
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.provisioner.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
resources:
{{ toYaml .Values.provisioner.resources | indent 12 }}
- name: csi-snapshotter
image: {{ .Values.snapshotter.image.repository }}:{{ .Values.snapshotter.image.tag }}
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
args:
- "--csi-address=$(ADDRESS)"
- "--v=5"
- "--timeout=60s"
env:
- name: ADDRESS
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
securityContext:
privileged: true
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
resources:
{{ toYaml .Values.snapshotter.resources | indent 12 }}
{{ if .Values.attacher.enabled }}
- name: csi-attacher
image: "{{ .Values.attacher.image.repository }}:{{ .Values.attacher.image.tag }}"
args:
- "--v=5"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: "{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.attacher.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
{{ end }}
- name: csi-rbdplugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "{{ .Values.nodeplugin.plugin.image.repository }}:{{ .Values.nodeplugin.plugin.image.tag }}"
args :
- "--nodeid=$(NODE_ID)"
- "--type=rbd"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=5"
- "--drivername=$(DRIVER_NAME)"
- "--containerized=true"
env:
- name: HOST_ROOTFS
value: "/rootfs"
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: "unix:/{{ .Values.socketDir }}/{{ .Values.socketFile }}"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: {{ .Values.socketDir }}
- name: host-rootfs
mountPath: "/rootfs"
- name: ceph-csi-config
mountPath: /etc/ceph-csi-config/
resources:
{{ toYaml .Values.nodeplugin.plugin.resources | indent 12 }}
volumes:
- name: socket-dir
emptyDir: {}
#FIXME this seems way too much. Why is it needed at all for this?
- name: host-rootfs
hostPath:
path: /
- name: ceph-csi-config
configMap:
name: {{ .Values.configMapName | quote }}
{{- if .Values.provisioner.affinity -}}
affinity:
{{ toYaml .Values.provisioner.affinity . | indent 8 }}
{{- end -}}
{{- if .Values.provisioner.nodeSelector -}}
nodeSelector:
{{ toYaml .Values.provisioner.nodeSelector | indent 8 }}
{{- end -}}
{{- if .Values.provisioner.tolerations -}}
tolerations:
{{ toYaml .Values.provisioner.tolerations | indent 8 }}
{{- end -}}