Update to v1.32

This commit is contained in:
Guillaume
2025-11-02 18:31:13 +01:00
parent 09e63cf400
commit de5971961c
43 changed files with 9492 additions and 1426 deletions

View File

@ -1,675 +0,0 @@
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: kube-system
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: kube-system
data:
allow-snippet-annotations: "true"
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "kube-system"
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: "kube-system"
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: kube-system
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: kube-system
spec:
type: LoadBalancer
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: kube-system
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
replicas: 1
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: "{{.vars.k8s_registry}}/ingress-nginx/controller:v1.1.0@sha256:f766669fdcf3dc26347ed273a55e754b427eb4411ee075a53f30718b4499076a"
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: "kube-system"
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: kube-system
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: "kube-system"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: kube-system
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: kube-system
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: "kube-system"
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: kube-system
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: "{{.vars.k8s_registry}}/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660"
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: kube-system
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.0.12
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: "1.1.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: "{{.vars.k8s_registry}}/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660"
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000

View File

@ -1,283 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kube-system
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kube-system
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.3.1
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kube-system
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.6
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

5144
addons/novit/calico.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -116,7 +116,7 @@ spec:
operator: "Exists" operator: "Exists"
containers: containers:
- name: coredns - name: coredns
image: coredns/coredns:1.9.3 image: coredns/coredns:1.12.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
limits: limits:

View File

@ -0,0 +1,752 @@
{{ if eq .vars.ingress_controller "nginx" }}
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data: null
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
minReadySeconds: 0
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
spec:
containers:
- args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: {{ .vars.gcr_io }}/ingress-nginx/controller:v1.12.3@sha256:ac444cd9515af325ba577b596fe4f27a34be1aa330538e8b317ad9d6c8fb94ee
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsGroup: 82
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission-create
spec:
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: {{ .vars.grc_io }}/ingress-nginx/kube-webhook-certgen:v1.5.4@sha256:7a38cf0f8480775baaee71ab519c7465fd1dfeac66c421f28f087786e631456e
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: {{ .vars.grc_io }}/ingress-nginx/kube-webhook-certgen:v1.5.2@sha256:e8825994b7a2c7497375a9b945f386506ca6a3eda80b89b74ef2db743f66a5ea
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.12.3
name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
port: 443
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None
{{ else if eq .vars.ingress_controller "kingress" }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kingress
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kingress
rules:
- apiGroups: ["","networking.k8s.io"]
resources:
- namespaces
- services
- endpoints
- secrets
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kingress
labels:
addonmanager.kubernetes.io/mode: Recreate
subjects:
- kind: ServiceAccount
name: kingress
namespace: kube-system
roleRef:
kind: ClusterRole
name: kingress
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
namespace: kube-system
name: kingress
labels:
k8s-app: kingress
spec:
minReadySeconds: 60
selector:
matchLabels:
k8s-app: kingress
template:
metadata:
labels:
k8s-app: kingress
spec:
containers:
- args:
- -tls-secret=kube-system/kingress-default-tls
image: mcluseau/kingress:v1.5.0
imagePullPolicy: IfNotPresent
name: kingress
hostNetwork: true
serviceAccountName: kingress
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
updateStrategy:
type: RollingUpdate
{{ end }}

View File

@ -85,7 +85,7 @@ spec:
image: mcluseau/knet-wg:0.9.0 image: mcluseau/knet-wg:0.9.0
args: args:
- --key-path=/etc/knet-wg/wg.key - --key-path=/etc/knet-wg/wg.key
- --kubeconfig=/run/k8s/kubeconfig.conf # to work without kube-proxy, be sure to define the kubernetes host - --kubeconfig=/run/k8s/kubeconfig.conf # to work without kube-proxy, be sure to define the kubernetes host
- --nft - --nft
- --nft-masq-oif=!=kwg - --nft-masq-oif=!=kwg
- --node-name=$(NODE_NAME) - --node-name=$(NODE_NAME)

View File

@ -78,7 +78,7 @@ spec:
hostNetwork: true hostNetwork: true
{{ if eq "kpng" .vars.kube_proxy }} {{ if eq "kpng" .vars.kube_proxy }}
containers: containers:
- image: mcluseau/kpng:0.4 - image: mcluseau/kpng:0.2
name: kpng name: kpng
volumeMounts: volumeMounts:
- name: empty - name: empty
@ -90,7 +90,7 @@ spec:
- --kubeconfig=/var/lib/kpng/kubeconfig.conf - --kubeconfig=/var/lib/kpng/kubeconfig.conf
- to-api - to-api
- --listen=unix:///k8s/proxy.sock - --listen=unix:///k8s/proxy.sock
- image: mcluseau/kpng:0.4 - image: mcluseau/kpng:0.2
name: kpng-nftables name: kpng-nftables
securityContext: securityContext:
capabilities: capabilities:
@ -132,7 +132,7 @@ spec:
fieldRef: fieldRef:
apiVersion: v1 apiVersion: v1
fieldPath: spec.nodeName fieldPath: spec.nodeName
image: {{.vars.k8s_registry}}/kube-proxy:{{.vars.kubernetes_version}} image: {{.vars.kube_proxy_image}}:{{.vars.kubernetes_version}}
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
name: kube-proxy name: kube-proxy
securityContext: securityContext:

View File

@ -0,0 +1,112 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubelet-csr-approver
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kubelet-csr-approver
rules:
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/approval
verbs:
- update
- apiGroups:
- certificates.k8s.io
resourceNames:
- kubernetes.io/kubelet-serving
resources:
- signers
verbs:
- approve
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-csr-approver
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubelet-csr-approver
subjects:
- kind: ServiceAccount
name: kubelet-csr-approver
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubelet-csr-approver
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app: kubelet-csr-approver
template:
metadata:
labels:
app: kubelet-csr-approver
spec:
serviceAccountName: kubelet-csr-approver
containers:
- name: kubelet-csr-approver
image: postfinance/kubelet-csr-approver:v1.2.10
resources:
limits:
memory: "128Mi"
cpu: "500m"
args:
- -metrics-bind-address
- ":8080"
- -health-probe-bind-address
- ":8081"
- -leader-election
livenessProbe:
httpGet:
path: /healthz
port: 8081
env:
- name: PROVIDER_REGEX
value: ^.*$
- name: PROVIDER_IP_PREFIXES
value: "0.0.0.0/0,::/0"
- name: MAX_EXPIRATION_SECONDS
value: "31622400" # 366 days
- name: BYPASS_DNS_RESOLUTION
value: "true"
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Equal

File diff suppressed because it is too large Load Diff

View File

@ -101,12 +101,12 @@ spec:
app.kubernetes.io/instance: local-static-provisioner app.kubernetes.io/instance: local-static-provisioner
spec: spec:
serviceAccountName: local-static-provisioner serviceAccountName: local-static-provisioner
# TODO remove after transition # TODO remove after transition
nodeSelector: nodeSelector:
local-pv: "true" local-pv: "true"
containers: containers:
- name: provisioner - name: provisioner
image: {{.vars.k8s_registry}}/sig-storage/local-volume-provisioner:v2.4.0 image: {{.vars.gcr_io}}/sig-storage/local-volume-provisioner:v2.8.0
securityContext: securityContext:
privileged: true privileged: true
env: env:
@ -119,7 +119,7 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE - name: JOB_CONTAINER_IMAGE
value: {{.vars.k8s_registry}}/sig-storage/local-volume-provisioner:v2.4.0 value: {{.vars.gcr_io}}/sig-storage/local-volume-provisioner:v2.8.0
ports: ports:
- name: metrics - name: metrics
containerPort: 8080 containerPort: 8080

View File

@ -1,4 +1,3 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
@ -105,10 +104,11 @@ metadata:
namespace: kube-system namespace: kube-system
spec: spec:
ports: ports:
- name: https - appProtocol: https
name: https
port: 443 port: 443
protocol: TCP protocol: TCP
targetPort: 4443 targetPort: https
selector: selector:
k8s-app: metrics-server k8s-app: metrics-server
--- ---
@ -134,11 +134,11 @@ spec:
containers: containers:
- args: - args:
- --cert-dir=/tmp - --cert-dir=/tmp
- --secure-port=4443 - --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port - --kubelet-use-node-status-port
- --metric-resolution=15s - --metric-resolution=15s
image: {{.vars.k8s_registry}}/metrics-server/metrics-server:v0.6.1 image: registry.k8s.io/metrics-server/metrics-server:v0.8.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
failureThreshold: 3 failureThreshold: 3
@ -149,7 +149,7 @@ spec:
periodSeconds: 10 periodSeconds: 10
name: metrics-server name: metrics-server
ports: ports:
- containerPort: 4443 - containerPort: 10250
name: https name: https
protocol: TCP protocol: TCP
readinessProbe: readinessProbe:
@ -166,9 +166,14 @@ spec:
memory: 200Mi memory: 200Mi
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
runAsNonRoot: true runAsNonRoot: true
runAsUser: 1000 runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts: volumeMounts:
- mountPath: /tmp - mountPath: /tmp
name: tmp-dir name: tmp-dir

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: novit-system
spec:
finalizers:
- kubernetes

View File

@ -1,13 +1,11 @@
---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: novit:kubelet-bootstrap name: novit
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
name: system:node-bootstrapper name: cluster-admin
subjects: subjects:
- apiGroup: rbac.authorization.k8s.io - kind: Group
kind: Group name: novit
name: system:bootstrappers

View File

@ -0,0 +1,11 @@
{{ if eq .vars.cloud_provider "vsphere" }}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: default
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: thin
{{ end }}

View File

@ -0,0 +1,26 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: novit:kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: auto-approve-renewals-for-nodes
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,840 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.8.0
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665"
creationTimestamp: null
name: volumesnapshots.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshot
listKind: VolumeSnapshotList
plural: volumesnapshots
shortNames:
- vs
singular: volumesnapshot
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: If a new snapshot needs to be created, this contains the name of
the source PVC from which this snapshot was (or will be) created.
jsonPath: .spec.source.persistentVolumeClaimName
name: SourcePVC
type: string
- description: If a snapshot already exists, this contains the name of the existing
VolumeSnapshotContent object representing the existing snapshot.
jsonPath: .spec.source.volumeSnapshotContentName
name: SourceSnapshotContent
type: string
- description: Represents the minimum size of volume required to rehydrate from
this snapshot.
jsonPath: .status.restoreSize
name: RestoreSize
type: string
- description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
jsonPath: .spec.volumeSnapshotClassName
name: SnapshotClass
type: string
- description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot
object intends to bind to. Please note that verification of binding actually
requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure
both are pointing at each other. Binding MUST be verified prior to usage of
this object.
jsonPath: .status.boundVolumeSnapshotContentName
name: SnapshotContent
type: string
- description: Timestamp when the point-in-time snapshot was taken by the underlying
storage system.
jsonPath: .status.creationTime
name: CreationTime
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time
snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: 'spec defines the desired characteristics of a snapshot requested
by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
Required.'
properties:
source:
description: source specifies where a snapshot will be created from.
This field is immutable after creation. Required.
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the
PersistentVolumeClaim object representing the volume from which
a snapshot should be created. This PVC is assumed to be in the
same namespace as the VolumeSnapshot object. This field should
be set if the snapshot does not exists, and needs to be created.
This field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a
pre-existing VolumeSnapshotContent object representing an existing
volume snapshot. This field should be set if the snapshot already
exists and only needs a representation in Kubernetes. This field
is immutable.
type: string
type: object
oneOf:
- required: ["persistentVolumeClaimName"]
- required: ["volumeSnapshotContentName"]
volumeSnapshotClassName:
description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass
requested by the VolumeSnapshot. VolumeSnapshotClassName may be
left nil to indicate that the default SnapshotClass should be used.
A given cluster may have multiple default Volume SnapshotClasses:
one default per CSI Driver. If a VolumeSnapshot does not specify
a SnapshotClass, VolumeSnapshotSource will be checked to figure
out what the associated CSI Driver is, and the default VolumeSnapshotClass
associated with that CSI Driver will be used. If more than one VolumeSnapshotClass
exist for a given CSI Driver and more than one have been marked
as default, CreateSnapshot will fail and generate an event. Empty
string is not allowed for this field.'
type: string
required:
- source
type: object
status:
description: status represents the current information of a snapshot.
Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent
objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent
point at each other) before using this object.
properties:
boundVolumeSnapshotContentName:
description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent
object to which this VolumeSnapshot object intends to bind to. If
not specified, it indicates that the VolumeSnapshot object has not
been successfully bound to a VolumeSnapshotContent object yet. NOTE:
To avoid possible security issues, consumers must verify binding
between VolumeSnapshot and VolumeSnapshotContent objects is successful
(by validating that both VolumeSnapshot and VolumeSnapshotContent
point at each other) before using this object.'
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time
snapshot is taken by the underlying storage system. In dynamic snapshot
creation case, this field will be filled in by the snapshot controller
with the "creation_time" value returned from CSI "CreateSnapshot"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "creation_time" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. If not specified, it may indicate
that the creation time of the snapshot is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation,
if any. This field could be helpful to upper level controllers(i.e.,
application controller) to decide whether they should continue on
waiting for the snapshot to be created based on the type of error
reported. The snapshot controller will keep retrying when an error
occurs during the snapshot creation. Upon success, this error field
will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be
logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if the snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in by the snapshot controller with the "ready_to_use"
value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "ready_to_use" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it, otherwise, this field will be set to "True". If not specified,
it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
type: string
description: restoreSize represents the minimum size of volume required
to create a volume from this snapshot. In dynamic snapshot creation
case, this field will be filled in by the snapshot controller with
the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call.
For a pre-existing snapshot, this field will be filled with the
"size_bytes" value returned from the CSI "ListSnapshots" gRPC call
if the driver supports it. When restoring a volume from this snapshot,
the size of the volume MUST NOT be smaller than the restoreSize
if it is specified, otherwise the restoration will fail. If not
specified, it indicates that the size is unknown.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
jsonPath: .spec.source.persistentVolumeClaimName
name: SourcePVC
type: string
- description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
jsonPath: .spec.source.volumeSnapshotContentName
name: SourceSnapshotContent
type: string
- description: Represents the minimum size of volume required to rehydrate from this snapshot.
jsonPath: .status.restoreSize
name: RestoreSize
type: string
- description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
jsonPath: .spec.volumeSnapshotClassName
name: SnapshotClass
type: string
- description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
jsonPath: .status.boundVolumeSnapshotContentName
name: SnapshotContent
type: string
- description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
jsonPath: .status.creationTime
name: CreationTime
type: date
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
# This indicates the v1beta1 version of the custom resource is deprecated.
# API requests to this version receive a warning in the server response.
deprecated: true
# This overrides the default warning returned to clients making v1beta1 API requests.
deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot"
schema:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
properties:
source:
description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
type: string
type: object
volumeSnapshotClassName:
description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
type: string
required:
- source
type: object
status:
description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
properties:
boundVolumeSnapshotContentName:
description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurs during the snapshot creation. Upon success, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
type: string
description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type: object
required:
- spec
type: object
served: false
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.8.0
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665"
creationTimestamp: null
name: volumesnapshotclasses.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotClass
listKind: VolumeSnapshotClassList
plural: volumesnapshotclasses
shortNames:
- vsclass
- vsclasses
singular: volumesnapshotclass
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .driver
name: Driver
type: string
- description: Determines whether a VolumeSnapshotContent created through the
VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .deletionPolicy
name: DeletionPolicy
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage
system uses when creating a volume snapshot. A specific VolumeSnapshotClass
is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
are non-namespaced
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent
created through the VolumeSnapshotClass should be deleted when its bound
VolumeSnapshot is deleted. Supported values are "Retain" and "Delete".
"Retain" means that the VolumeSnapshotContent and its physical snapshot
on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are deleted.
Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this
VolumeSnapshotClass. Required.
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific
parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
served: true
storage: true
subresources: {}
- additionalPrinterColumns:
- jsonPath: .driver
name: Driver
type: string
- description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .deletionPolicy
name: DeletionPolicy
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
# This indicates the v1beta1 version of the custom resource is deprecated.
# API requests to this version receive a warning in the server response.
deprecated: true
# This overrides the default warning returned to clients making v1beta1 API requests.
deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass"
schema:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
served: false
storage: false
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.8.0
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/665"
creationTimestamp: null
name: volumesnapshotcontents.snapshot.storage.k8s.io
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotContent
listKind: VolumeSnapshotContentList
plural: volumesnapshotcontents
shortNames:
- vsc
- vscs
singular: volumesnapshotcontent
scope: Cluster
versions:
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: Represents the complete size of the snapshot in bytes
jsonPath: .status.restoreSize
name: RestoreSize
type: integer
- description: Determines whether this VolumeSnapshotContent and its physical
snapshot on the underlying storage system should be deleted when its bound
VolumeSnapshot is deleted.
jsonPath: .spec.deletionPolicy
name: DeletionPolicy
type: string
- description: Name of the CSI driver used to create the physical snapshot on
the underlying storage system.
jsonPath: .spec.driver
name: Driver
type: string
- description: Name of the VolumeSnapshotClass to which this snapshot belongs.
jsonPath: .spec.volumeSnapshotClassName
name: VolumeSnapshotClass
type: string
- description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent
object is bound.
jsonPath: .spec.volumeSnapshotRef.name
name: VolumeSnapshot
type: string
- description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.namespace
name: VolumeSnapshotNamespace
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot
object in the underlying storage system
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created
by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent
and its physical snapshot on the underlying storage system should
be deleted when its bound VolumeSnapshot is deleted. Supported values
are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are kept.
"Delete" means that the VolumeSnapshotContent and its physical snapshot
on underlying storage system are deleted. For dynamically provisioned
snapshots, this field will automatically be filled in by the CSI
snapshotter sidecar with the "DeletionPolicy" field defined in the
corresponding VolumeSnapshotClass. For pre-existing snapshots, users
MUST specify this field when creating the VolumeSnapshotContent
object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the
physical snapshot on the underlying storage system. This MUST be
the same as the name returned by the CSI GetPluginName() call for
that driver. Required.
type: string
source:
description: source specifies whether the snapshot is (or should be)
dynamically provisioned or already exists, and just requires a Kubernetes
object representation. This field is immutable after creation. Required.
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of
a pre-existing snapshot on the underlying storage system for
which a Kubernetes object representation was (or should be)
created. This field is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the
volume from which a snapshot should be dynamically taken from.
This field is immutable.
type: string
type: object
oneOf:
- required: ["snapshotHandle"]
- required: ["volumeHandle"]
sourceVolumeMode:
description: SourceVolumeMode is the mode of the volume whose snapshot
is taken. Can be either “Filesystem” or “Block”. If not specified,
it indicates the source volume's mode is unknown. This field is
immutable. This field is an alpha field.
type: string
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass from which this snapshot
was (or will be) created. Note that after provisioning, the VolumeSnapshotClass
may be deleted or recreated with different set of values, and as
such, should not be referenced post-snapshot creation.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object
to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
field must reference to this VolumeSnapshotContent's name for the
bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
object, name and namespace of the VolumeSnapshot object MUST be
provided for binding to happen. This field is immutable after creation.
Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: creationTime is the timestamp when the point-in-time
snapshot is taken by the underlying storage system. In dynamic snapshot
creation case, this field will be filled in by the CSI snapshotter
sidecar with the "creation_time" value returned from CSI "CreateSnapshot"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "creation_time" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. If not specified, it indicates
the creation time is unknown. The format of this field is a Unix
nanoseconds time encoded as an int64. On Unix, the command `date
+%s%N` returns the current time in nanoseconds since 1970-01-01
00:00:00 UTC.
format: int64
type: integer
error:
description: error is the last observed error during snapshot creation,
if any. Upon success after retry, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be
logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in by the CSI snapshotter sidecar with the "ready_to_use"
value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "ready_to_use" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it, otherwise, this field will be set to "True". If not specified,
it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot
in bytes. In dynamic snapshot creation case, this field will be
filled in by the CSI snapshotter sidecar with the "size_bytes" value
returned from CSI "CreateSnapshot" gRPC call. For a pre-existing
snapshot, this field will be filled with the "size_bytes" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it. When restoring a volume from this snapshot, the size of the
volume MUST NOT be smaller than the restoreSize if it is specified,
otherwise the restoration will fail. If not specified, it indicates
that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot
on the underlying storage system. If not specified, it indicates
that dynamic snapshot creation has either failed or it is still
in progress.
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- description: Indicates if the snapshot is ready to be used to restore a volume.
jsonPath: .status.readyToUse
name: ReadyToUse
type: boolean
- description: Represents the complete size of the snapshot in bytes
jsonPath: .status.restoreSize
name: RestoreSize
type: integer
- description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
jsonPath: .spec.deletionPolicy
name: DeletionPolicy
type: string
- description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
jsonPath: .spec.driver
name: Driver
type: string
- description: Name of the VolumeSnapshotClass to which this snapshot belongs.
jsonPath: .spec.volumeSnapshotClassName
name: VolumeSnapshotClass
type: string
- description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.name
name: VolumeSnapshot
type: string
- description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
jsonPath: .spec.volumeSnapshotRef.namespace
name: VolumeSnapshotNamespace
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
# This indicates the v1beta1 version of the custom resource is deprecated.
# API requests to this version receive a warning in the server response.
deprecated: true
# This overrides the default warning returned to clients making v1beta1 API requests.
deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent"
schema:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
type: string
source:
description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
type: string
type: object
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
format: int64
type: integer
error:
description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
properties:
message:
description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
type: string
type: object
required:
- spec
type: object
served: false
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -3,13 +3,13 @@
profile: server profile: server
per_host: true per_host: true
template: | template: |
{"CN":"{{.host.name}}","hosts":["127.0.0.1","{{.host.ip}}"],"key":{"algo":"ecdsa","size":256}} {"CN":"{{.host.name}}","hosts":["127.0.0.1","{{.host.ip}}"{{ range .host.ips }},"{{.}}"{{ end }}],"key":{"algo":"ecdsa","size":256}}
- name: etcd-peer - name: etcd-peer
ca: etcd ca: etcd
profile: peer profile: peer
per_host: true per_host: true
template: | template: |
{"CN":"{{.host.name}}","hosts":["127.0.0.1","{{.host.ip}}"],"key":{"algo":"ecdsa","size":256}} {"CN":"{{.host.name}}","hosts":["127.0.0.1","{{.host.ip}}"{{ range .host.ips }},"{{.}}"{{ end }}],"key":{"algo":"ecdsa","size":256}}
- name: etcd-client - name: etcd-client
ca: etcd ca: etcd
profile: client profile: client
@ -27,6 +27,7 @@
{{- if .vars.public_vip }}"{{.vars.public_vip}}",{{end}} {{- if .vars.public_vip }}"{{.vars.public_vip}}",{{end}}
{{- if .vars.dmz_vip }}"{{.vars.dmz_vip}}",{{end}} {{- if .vars.dmz_vip }}"{{.vars.dmz_vip}}",{{end}}
{{- if .vars.apiserver_vip }}"{{.vars.apiserver_vip}}",{{ end }} {{- if .vars.apiserver_vip }}"{{.vars.apiserver_vip}}",{{ end }}
{{- range .host.ips }}"{{.}}",{{ end }}
"{{.host.ip}}" "{{.host.ip}}"
],"key":{"algo":"ecdsa","size":521}} ],"key":{"algo":"ecdsa","size":521}}
- name: cluster-client - name: cluster-client
@ -44,3 +45,9 @@
profile: client profile: client
template: | template: |
{"CN":"proxy-client","hosts":["*"],"key":{"algo":"ecdsa","size":256}} {"CN":"proxy-client","hosts":["*"],"key":{"algo":"ecdsa","size":256}}
- name: node-bootstrap
ca: cluster
profile: bootstrap-client
template: |
{"CN":"system:node:${host_name}","names":[{"O":"system:nodes"}],
"hosts":[{{- range .host.ips }}"{{.}}",{{ end }}"${host_ip}"],"key":{"algo":"ecdsa","size":256}}

View File

@ -1,3 +1,5 @@
#!include common
#!include clusters/k8s
domain: cluster.local domain: cluster.local
subnets: subnets:
@ -5,52 +7,12 @@ subnets:
pods: 192.168.64.0/20 pods: 192.168.64.0/20
addons: addons:
- essentials - novit
vars: vars:
kubernetes_version: v1.26.4 is_vmware: false
devname_match: /dev/([shv]da|nvme[0-9]+n[0-9]+)
bootstrap_auths: #devname_match: /dev/[shv]da
- name: "my-user"
sshKey: "ssh-ed25519 xxx my-user"
ssh_keys:
- "ssh-ed25519 xxx my-user"
devname_match: /dev/([shv]da|nmve[0-9]+n[0-9]+)
# to match a specific partition (here: 3):
#devname_match: /dev/([shv]da|nvme[0-9]+n[0-9]+p)3
vip_interface: main
public_vip: 172.17.1.138
netmask: 24
gateway: 172.17.1.8
dns:
#- 172.17.1.1
- 208.67.220.220
dls_base_url: http://172.17.1.8:7606
etcd:
image: quay.io/coreos/etcd
version: v3.5.9
cluster_state: new # set to existing to secure existing clusters
keepalived:
image: novitnc/keepalived
version: 2.0.19
garp_master_refresh: 60
router_id: 1
control_plane:
api_port: 6443
reserve_resources: true
iface: "(en|eth).*"
containerd_size: 50%FREE
local_storage_class: local
encrypt_disks: false encrypt_disks: false
@ -58,7 +20,68 @@ vars:
ingress_controller: nginx ingress_controller: nginx
kube_proxy: proxy kube_proxy: proxy
k8s_registry: registry.k8s.io node_cidr_mask_size: 24
gcr_io: registry.k8s.io
kube_proxy_image: registry.k8s.io/kube-proxy
control_plane:
api_port: 6443
reserve_resources: false
etcd_image: quay.io/coreos/etcd
etcd_version: v3.5.21
etcd_cluster_state: new # set to new for new clusters (manually on hosts is fine)
etcd_split_events: false
keepalived_image: novitnc/keepalived
keepalived_version: 2.0.19
keepalived_garp_master_refresh: 60
kernel_modules: [] kernel_modules: []
gateway: 172.17.1.8
dls_base_url: http://172.17.1.8:7606
dnses:
- 1.1.1.1
- 8.8.8.8
netmask: 24
iface: "(en|eth).*"
ping_gateway: false
public_vip: 172.17.1.138
vip_interface: main
keepalived_router_id: 1
containerd_size: 50%FREE
enable_serial_console: false
enable_nfs: false
ntp_server_mode: false
no_proxy: 192.168.0.0/16,172.16.0.0/12,10.0.0.0/8
cloud_provider: none
docker_registries_mirror_cache:
enable: true
mb: 20000
port: 8585
docker_registries_mirror_repos:
registry.k8s.io: /https/registry.k8s.io/v2
docker.io: /https/registry-1.docker.io/v2
gcr.io: /https/gcr.io/v2
quay.io: /https/quay.io/v2
tick:
storageClass: local
elasticsearch:
# below is namespace of ES deployment
default:
enabled: false
endpoint: es6
kafka:
default:
enabled: false
service: kafka
local_storage_class: local

2
clusters/k8s.yaml Normal file
View File

@ -0,0 +1,2 @@
vars:
kubernetes_version: v1.32.9

2
common.yaml Normal file
View File

@ -0,0 +1,2 @@
vars:
master_group: "master"

View File

@ -5,8 +5,14 @@ anti_phishing_code: "Direktil<3"
modules: /modules.sqfs modules: /modules.sqfs
auths: auths:
- name: local
password: {{ password "root" "bootstrap" }}
{{ .vars.bootstrap_auths |yaml }} {{ .vars.bootstrap_auths |yaml }}
ssh:
listen: "[::]:22"
user_ca: "/user_ca.pub"
networks: networks:
- name: loopback - name: loopback
interfaces: [ { var: iface, n: 1, regexps: [ "^lo$" ] } ] interfaces: [ { var: iface, n: 1, regexps: [ "^lo$" ] } ]
@ -15,27 +21,29 @@ networks:
ip a add ::1/128 dev lo ip a add ::1/128 dev lo
ip li set lo up ip li set lo up
{{- if .vars.net_custom }} {{ if .vars.net_custom }}
{{ .vars.net_custom | indent " " }} {{ .vars.net_custom | indent " " }}
{{- else }} {{ else }}
ip link add name main type bond ip link add name main type bond
ip addr add {{.host.ip}}/{{.vars.netmask}} dev main {{ if not .vars.net_dhcp }}
ip addr add {{host_ip}}/{{.vars.netmask}} dev main
{{ end }}
ip link set main up ip link set main up
{{- if .vars.gateway_mask }} {{ if .vars.gateway_mask }}
ip route add {{.vars.gateway}}/{{.vars.gateway_mask}} dev main ip route add {{.vars.gateway}}/{{.vars.gateway_mask}} dev main
{{- end }} {{ end }}
{{ if not .vars.net_dhcp }}
ip route add default via {{.vars.gateway}} ip route add default via {{.vars.gateway}}
{{ end }}
cat >>/etc/resolv.conf <<EOF {{ if .vars.dns }}
{{- range .vars.dns }} echo "nameserver {{.vars.dns}}" >/etc/resolv.conf
nameserver {{.}} {{ end }}
{{- end }}
EOF
- name: main - name: main
interfaces: interfaces:
- var: ifaces - var: ifaces
n: -1 # grab all matches n: -1 # grab all matches
regexps: regexps:
- {{ .vars.iface }} - {{ .vars.iface }}
script: | script: |
@ -44,9 +52,37 @@ networks:
ip link set $iface master main ip link set $iface master main
ip li set $iface up ip li set $iface up
done done
{{ if .vars.net_dhcp }}
udhcpc -i main
{{ end }}
{{- range .vars.extra_routes }}
ip route add {{.}}
{{- end }} {{- end }}
{{ if and .vars.dmz_ip .vars.dmz_netmask .vars.dmz_interface }}
- interfaces:
- var: ifaces
regexps:
- {{ .vars.dmz_interface }}
n: 1
script: |
ip a add {{.vars.dmz_ip}}/{{.vars.dmz_netmask}} dev $iface
ip li set $iface up
{{ if .vars.dmz_net_custom }}
{{ .vars.dmz_net_custom | indent " " }}
{{ end }}
{{ end }}
{{ end }}
{{- with .vars.network_extra }}
{{ . }}
{{- end }}
{{ if .vars.pre_lvm_crypt }}
pre_lvm_crypt:
{{ .vars.pre_lvm_crypt |yaml }}
{{ end }}
lvm: lvm:
- vg: storage - vg: storage
pvs: pvs:
@ -64,22 +100,31 @@ lvm:
- name: varlog - name: varlog
extents: 10%VG extents: 10%VG
{{ if .vars.is_master }} {{- if .vars.is_master }}
- name: etcd - name: etcd
extents: 10%VG extents: 10%VG
{{ end }} {{- end }}
- name: kubelet - name: kubelet
extents: 5%VG extents: 5%VG
{{- if .vars.cri_o }}
- name: crio
extents: {{ .vars.containerd_size }}
{{- else }}
- name: containerd - name: containerd
extents: {{ .vars.containerd_size }} extents: {{ .vars.containerd_size }}
{{- end }}
crypt: crypt:
{{- if .vars.encrypt_disks }} {{- if .vars.encrypt_disks }}
- dev: /dev/storage/bootstrap - dev: /dev/storage/bootstrap
- dev: /dev/storage/varlog - dev: /dev/storage/varlog
- dev: /dev/storage/kubelet - dev: /dev/storage/kubelet
{{- if .vars.cri_o }}
- dev: /dev/storage/crio
{{- else }}
- dev: /dev/storage/containerd - dev: /dev/storage/containerd
{{- end }}
{{- if .vars.is_master }} {{- if .vars.is_master }}
- dev: /dev/storage/etcd - dev: /dev/storage/etcd
{{- end }} {{- end }}
@ -95,5 +140,7 @@ bootstrap:
{{- end }} {{- end }}
{{ if .vars.dls_base_url }} {{ if .vars.dls_base_url }}
seed: {{ .vars.dls_base_url }}/hosts-by-token/{{ host_download_token }}/bootstrap.tar seed: {{ .vars.dls_base_url }}/hosts-by-token/{{ host_download_token }}/bootstrap.tar
# TODO seed_sign_key: "..."
{{ end }} {{ end }}
# TODO load_and_close: true

View File

@ -1,346 +0,0 @@
root_user:
password_hash: ""
authorized_keys:
{{- range .vars.ssh_keys }}
- "{{ . }}"
{{- end }}
{{- if .vars.extra_ssh_keys }}
{{- range .vars.extra_ssh_keys }}
- "{{ . }}"
{{- end }}
{{- end }}
layers: # it's TOP to bottom
- kubernetes
- init
- modules
- system
{{ if .vars.modules -}}
modules:
{{- range .vars.modules }}
- {{ . }}
{{- end }}
{{- end }}
mounts:
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/varlog
path: /var/log
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/kubelet
path: /var/lib/kubelet
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/containerd
path: /var/lib/containerd
{{ if .vars.is_master }}
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/etcd
path: /var/lib/etcd
{{ end }}
files:
- path: /etc/machine-id
content: |
{{ machine_id }}
- path: /etc/rc.conf
content: |
rc_shell=/sbin/sulogin
rc_logger="YES"
#rc_log_path="/var/log/rc.log"
unicode="YES"
rc_tty_number=12
rc_cgroup_mode="legacy"
rc_cgroup_memory_use_hierarchy="YES"
rc_controller_cgroups="YES"
- path: /etc/hostname
content: "{{.host.name}}\n"
- path: /etc/hosts
content: |
127.0.0.1 localhost {{.host.name}}{{ if not .vars.public_vip }} kubernetes{{end}}
::1 localhost {{.host.name}}{{ if not .vars.public_vip }} kubernetes{{end}}
{{ if .vars.public_vip }}
{{ .vars.public_vip }} kubernetes
{{ end }}
{{ if .vars.extra_hosts }}
{{ range .vars.extra_hosts }}
{{ . }}
{{ end -}}
{{ end }}
- path: /etc/resolv.conf
content: |
{{- range .vars.dns }}
nameserver {{ . }}
{{- end }}
- path: /etc/sysctl.conf
content: |
fs.file-max = 20971520
fs.inotify.max_user_watches = 1048576
kernel.pid_max = 1048576
net.ipv4.ip_forward = 1
vm.max_map_count = 262144
net.ipv4.neigh.default.gc_thresh1 = 16384
net.ipv4.neigh.default.gc_thresh2 = 28672
net.ipv4.neigh.default.gc_thresh3 = 32768
# -------------------------------------------------------------------------
{{ ssh_host_keys "/etc/ssh" }}
# ------------------------------------------------------------------------
{{ if .vars.is_master }}
# certificates for etcd servers
{{ tls_dir "etcd-server" }}
{{ tls_dir "etcd-peer" }}
# certificates for etcd clients
{{ tls_dir "etcd-client" }}
# cluster certificates
{{ ca_dir "cluster" }}
{{ ca_dir "service-accounts" }}
{{ tls_dir "apiserver" }}
{{ tls_dir "kubelet-client" }}
{{ tls_dir "proxy-client" }}
{{ end }}
{{ tls_dir "cluster-client" }}
{{ if .vars.is_master -}}
- path: /etc/kubernetes/token-auth.csv
mode: 0600
content: |
{{ token "bootstrap" }},kubelet-bootstrap,10001,"system:bootstrappers"
{{ token "admin" }},admin-token,10002,"system:masters"
{{- end }}
# ------------------------------------------------------------------------
- path: /etc/chrony/chrony.conf
mode: 0644
content: |
{{ if .vars.ntp_servers -}}
{{ range .vars.ntp_servers -}}
server {{ . }} iburst
{{ end -}}
{{ else -}}
server 0.gentoo.pool.ntp.org iburst
server 1.gentoo.pool.ntp.org iburst
server 2.gentoo.pool.ntp.org iburst
server 3.gentoo.pool.ntp.org iburst
{{- end }}
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
# ------------------------------------------------------------------------
- path: /etc/direktil/services/k8s-local-volumes
mode: 0755
content: |
#! /bin/sh
# ---
# restart: 3
while true
do
for dev in /dev/storage/k8s-pv-*
do
[ -e $dev ] || continue
tgt=${dev/dev/mnt}
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
for dev in /dev/mapper/k8s-pv-*
do
[ -e $dev ] || continue
tgt=/mnt/storage/mapper__$(basename $dev)
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
sleep 10
done
# ------------------------------------------------------------------------
- path: /etc/direktil/services/containerd
mode: 0755
content: |
#! /bin/bash
# ---
# restart: 3
# provides:
# - k8s-runtime
set -ex
ulimit -n 1048576
ulimit -u unlimited
ulimit -c unlimited
{{ if .vars.proxy -}}
export HTTP_PROXY={{.vars.proxy}}
export HTTPS_PROXY="$HTTP_PROXY"
export NO_PROXY="192.168.0.0/16,172.16.0.0/12,10.0.0.0/8"
{{- end }}
exec /usr/bin/containerd \
--log-level info
# ------------------------------------------------------------------------
- path: /etc/direktil/services/kubelet
mode: 0755
content: |
#! /bin/sh
# ---
# restart: 3
# needs:
# - k8s-runtime
set -ex
ctr_sock="/run/containerd/containerd.sock"
echo "waiting for $ctr_sock"
while ! [ -e $ctr_sock ]; do sleep 1; done
#ulimit -n 1048576
mkdir -p /var/lib/kubelet/manifests
exec /usr/bin/kubelet \
--config=/etc/kubernetes/kubelet.yaml \
{{- if .vars.hostname_override }}
--hostname-override={{.vars.hostname_override}} \
{{- end }}
{{- range $k, $v := .labels }}
--node-labels={{ $k }}={{$v}} \
{{- end }}
--container-runtime-endpoint=unix://$ctr_sock \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--hostname-override={{.host.name}} \
--node-ip={{.host.ip}}
# -------------------------------------------------------------------------
{{ $podPidsLimit := 4096 -}}
- path: /etc/kubernetes/kubelet.yaml
mode: 0600
content: |
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
staticPodPath: /var/lib/kubelet/manifests
makeIPTablesUtilChains: {{ eq .vars.kube_proxy "proxy" }}
clusterDomain: {{.cluster.domain}}
clusterDNS:
- {{.cluster.dns_svc_ip }}
podCIDR: {{.cluster.subnets.pods}}
address: 0.0.0.0
authentication:
x509:
clientCAFile: /etc/tls/cluster-client/ca.crt
anonymous:
enabled: false
maxPods: 220
serializeImagePulls: false
featureGates: {}
serverTLSBootstrap: true
rotateCertificates: true
podPidsLimit: {{ $podPidsLimit }}
containerLogMaxFiles: 2
containerLogMaxSize: 16Mi
# cgroups configuration
cgroupsPerQOS: true
cgroupDriver: cgroupfs
systemReservedCgroup: openrc
systemReserved:
cpu: "{{ .vars.system_reserved.cpu }}"
memory: "{{ .vars.system_reserved.memory }}"
kubeReservedCgroup: podruntime
kubeReserved:
cpu: "{{ .vars.kube_reserved.cpu }}"
memory: "{{ .vars.kube_reserved.memory }}"
#evictionHard:
# memory.available: 100Mi
- path: /etc/kubernetes/haproxy-api.cfg
content: |
frontend k8s-api
bind 127.0.0.1:6444
bind [::1]:6444
mode tcp
default_backend k8s-api
backend k8s-api
mode tcp
option tcp-check
balance random
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
{{- $apiPort := .vars.control_plane.api_port -}}
{{- range $i, $host := hosts_by_group "master" }}
server {{$host.name}}_0 {{$host.ip}}:{{ $apiPort }} check
{{- end }}
- path: /etc/kubernetes/bootstrap.kubeconfig
mode: 0600
content: |
apiVersion: v1
kind: Config
preferences: {}
current-context: local
clusters:
- cluster:
certificate-authority: /etc/tls/cluster-client/ca.crt
server: https://[::1]:6444
name: local
contexts:
- context:
cluster: local
user: kubelet-bootstrap
name: local
users:
- name: kubelet-bootstrap
user:
token: {{ token "bootstrap" }}
- path: /etc/kubernetes/control-plane/kubeconfig
mode: 0600
content: |
apiVersion: v1
kind: Config
preferences: {}
current-context: local
clusters:
- cluster:
certificate-authority: /etc/tls/cluster-client/ca.crt
server: https://[::1]:6444
name: local
contexts:
- context:
cluster: local
user: control-plane
name: local
users:
- name: control-plane
user:
token: {{ token "admin" }}
{{ static_pods_files "/etc/kubernetes/manifests.static" }}

833
configs/system.yaml Normal file
View File

@ -0,0 +1,833 @@
root_user:
password_hash: {{ password "root" "sha512crypt" }}
authorized_keys:
{{- range .vars.ssh_keys }}
- "{{ . }}"
{{- end }}
{{- if .vars.additional_ssh_keys }}
{{- range .vars.additional_ssh_keys }}
- "{{ . }}"
{{- end }}
{{- end }}
layers: # it's TOP to bottom
- kubernetes{{if .vars.cri_o}}-crio{{end}}
- init
- modules
- system
{{ if .vars.modules -}}
modules:
{{- range .vars.modules }}
- {{ . }}
{{- end }}
{{- end }}
mounts:
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/varlog
path: /var/log
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/kubelet
path: /var/lib/kubelet
{{- if .vars.cri_o }}
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/crio
path: /var/lib/crio
{{- else }}
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/containerd
path: /var/lib/containerd
{{- end }}
{{ if .vars.is_master }}
- dev: /dev/{{ if .vars.encrypt_disks }}mapper{{ else }}storage{{ end }}/etcd
path: /var/lib/etcd
{{ end }}
{{ if and .vars.novit .vars.novit.vpn .vars.novit.vpn.site_prefix .vars.novit.vpn.ip_suffix }}
vpns:
- name: wgnovit
port: 39519
ips:
- fd6e:7674:{{ .vars.novit.vpn.site_prefix}}:{{ .vars.novit.vpn.ip_suffix }}/32
peers:
- public_key: hVK5qiuJBqKtankcMI8MZtDNfI3h4U/nk2qMRfw35UE=
endpoint:
ip: 141.95.173.143
port: 39519
keepalive: 30s
allowed_ips:
- fd6e:7674::/80
{{ end }}
files:
{{- define "rc_dkl" }}
- path: "/etc/runlevels/default/dkl-{{.}}"
symlink: "../../init.d/dkl-{{.}}"
- path: "/etc/init.d/dkl-{{.}}"
mode: 0o755
content: |
#! /sbin/openrc-run
description="dkl: {{.}}"
supervisor=supervise-daemon
respawn_delay=8
respawn_max=0
command="/sbin/dkl logger /etc/direktil/services/{{.}}"
depend() { after local ; }
{{- end }}
- path: /etc/systemd/system/dkl.service
content: |
[Service]
{{- if not .vars.disable_secure_routes }}
ExecStartPre=/etc/local.d/secure-routes.start
{{- end }}
ExecStartPre=/sbin/dkl dynlay kubernetes {{ .host.versions.kubernetes }}
ExecStart=/sbin/dkl init services
Restart=always
RestartSec=10
{{- if .vars.proxy }}
Environment=HTTP_PROXY={{.vars.proxy}} HTTPS_PROXY={{.vars.proxy}} NO_PROXY="{{.vars.no_proxy}}"
{{- end }}
[Unit]
After=network-online.target
[Install]
WantedBy=multi-user.target
- path: /etc/machine-id
content: |
{{ machine_id }}
- path: /etc/inittab
content: |
id:3:initdefault:
si::sysinit:/sbin/openrc sysinit
rc::bootwait:/sbin/openrc boot
l0u:0:wait:/sbin/telinit u
l0:0:wait:/sbin/openrc shutdown
l0s:0:wait:/sbin/halt -dhnp
l1:1:wait:/sbin/openrc single
l2:2:wait:/sbin/openrc nonetwork
l3:3:wait:/sbin/openrc default
l4:4:wait:/sbin/openrc default
l5:5:wait:/sbin/openrc default
l6u:6:wait:/sbin/telinit u
l6:6:wait:/sbin/openrc reboot
l6r:6:wait:/sbin/reboot -dkn
su0:S:wait:/sbin/openrc single
su1:S:wait:/sbin/sulogin
c1:12345:respawn:/sbin/agetty --noclear 38400 tty1 linux
c2:2345:respawn:/sbin/agetty 38400 tty2 linux
{{- if .vars.enable_serial_console }}
s0:12345:respawn:/sbin/agetty --noclear -L 115200 ttyS0 vt100
{{- end }}
ca:12345:ctrlaltdel:/sbin/shutdown -r now
- path: /etc/rc.conf
content: |
rc_shell=/sbin/sulogin
rc_logger="YES"
#rc_log_path="/var/log/rc.log"
unicode="YES"
rc_tty_number=12
rc_need="!net"
- path: /etc/conf.d/netmount
content: ""
- path: /etc/hostname
content: "{{host_name}}\n"
- path: /etc/hosts
content: |
127.0.0.1 localhost {{host_name}}{{ if not .vars.public_vip }} kubernetes{{end}}
::1 localhost {{host_name}}{{ if not .vars.public_vip }} kubernetes{{end}}
{{ if .vars.public_vip }}
{{ .vars.public_vip }} kubernetes
{{- else }}{{ range shuffled_hosts_by_group .vars.master_group }}
{{ .ip }} kubernetes
{{- end }}{{ end }}
{{ with .vars.additional_hosts }}{{- range . }}
{{ . }}
{{- end }}{{ end }}
- path: /etc/resolv.conf
content: |
{{ if .vars.dns -}}
nameserver {{ .vars.dns }}
{{ end -}}
{{ if .vars.dnses -}}
{{ range .vars.dnses }}
nameserver {{ . }}
{{ end -}}
{{- end }}
- path: /etc/sysctl.conf
content: |
fs.file-max = 20971520
fs.inotify.max_user_watches = 1048576
kernel.pid_max = 1048576
net.ipv4.ip_forward = 1
vm.max_map_count = 262144
net.ipv4.neigh.default.gc_thresh1 = 16384
net.ipv4.neigh.default.gc_thresh2 = 28672
net.ipv4.neigh.default.gc_thresh3 = 32768
{{ if .vars.enable_mtu_probing -}}
net.ipv4.tcp_mtu_probing = 2
{{- end }}
- path: /etc/udev/rules.d/50-io-scheduler.rules
content: |
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/scheduler}="bfq", ATTR{queue/nr_requests}="1024"
ACTION=="add|change", KERNEL=="nvme[0-9]n[0-9]", ATTR{queue/scheduler}="bfq", ATTR{queue/nr_requests}="2048"
{{ with .vars.additional_certs }}
- path: /usr/local/share/ca-certificates/novit.crt
content: |
{{ . | indent " " }}
{{ end }}
# -------------------------------------------------------------------------
{{ ssh_user_ca "/etc/ssh/user_ca.pub" }}
{{ ssh_host_keys "/etc/ssh" }}
- path: /etc/ssh/sshd_config
mode: 0o600
content: |
TrustedUserCAKeys /etc/ssh/user_ca.pub
Include "/etc/ssh/sshd_config.d/*.conf"
# ------------------------------------------------------------------------
{{ if .vars.is_master }}
# certificates for etcd servers
{{ tls_dir "etcd-server" }}
{{ tls_dir "etcd-peer" }}
# certificates for etcd clients
{{ tls_dir "etcd-client" }}
# cluster certificates
{{ ca_dir "cluster" }}
{{ ca_dir "service-accounts" }}
{{ tls_dir "apiserver" }}
{{ tls_dir "kubelet-client" }}
{{ tls_dir "proxy-client" }}
{{ end }}
{{ tls_dir "cluster-client" }}
{{ if .vars.is_master -}}
- path: /etc/kubernetes/token-auth.csv
mode: 0o600
content: |
{{ token "bootstrap" }},kubelet-bootstrap,10001,"system:bootstrappers"
{{ token "admin" }},admin-token,10002,"system:masters"
{{- end }}
# ------------------------------------------------------------------------
- path: /etc/runlevels/default/chrony
symlink: ../../init.d/chrony
- path: /etc/chrony/chrony.conf
mode: 0o644
content: |
{{ if .vars.ntp_servers -}}
{{ range .vars.ntp_servers -}}
server {{ . }} iburst
{{ end -}}
{{ else -}}
server 0.gentoo.pool.ntp.org iburst
server 1.gentoo.pool.ntp.org iburst
server 2.gentoo.pool.ntp.org iburst
server 3.gentoo.pool.ntp.org iburst
{{- end }}
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
{{ if .vars.ntp_server_mode -}}
allow all
{{- end }}
{{ if .vars.is_vmware -}}
# ------------------------------------------------------------------------
{{ template "rc_dkl" "vmtoolsd" }}
- path: /etc/direktil/services/vmtoolsd
mode: 0o755
content: |
#! /bin/sh
# ---
# restart: 3
set -ex
exec /usr/bin/vmtoolsd
{{- end }}
{{ if .vars.novit_host_token }}
# ------------------------------------------------------------------------
{{ template "rc_dkl" "novit-connect" }}
- path: /etc/direktil/services/novit-connect
mode: 0o755
content: |
#! /bin/sh
{{ if .vars.proxy -}}
export HTTP_PROXY={{.vars.proxy}}
export HTTPS_PROXY="$HTTP_PROXY"
export NO_PROXY="{{.vars.no_proxy}}"
{{- end }}
exec /bin/novit-connect -token {{.vars.novit_host_token}}
{{ end }}
# ------------------------------------------------------------------------
{{ template "rc_dkl" "k8s-local-volumes" }}
- path: /etc/direktil/services/k8s-local-volumes
mode: 0o755
content: |
#! /bin/sh
# ---
# restart: 3
while true
do
for dev in /dev/storage/k8s-pv-*
do
[ -e $dev ] || continue
tgt=${dev/dev/mnt}
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
for dev in /dev/k8s-pv/*
do
[ -e $dev ] || continue
tgt=/mnt/storage/k8s-pv__$(basename $dev)
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
for dev in /dev/mapper/k8s-pv-*
do
[ -e $dev ] || continue
tgt=/mnt/storage/mapper__$(basename $dev)
[ -e $tgt ] || {
mkdir -p $(dirname $tgt)
ln -s $dev $tgt
}
done
sleep 10
done
# ------------------------------------------------------------------------
{{- $cr := "containerd" }}{{ if .vars.cri_o }}{{ $cr = "crio" }}{{end}}
{{ template "rc_dkl" $cr }}
- path: /etc/direktil/services/{{$cr}}
mode: 0o755
content: |
#! /bin/bash
set -ex
cg=cpu,memory
cgcreate -g $cg:podruntime
cgclassify -g $cg:podruntime $$
ulimit -n 1048576
ulimit -u unlimited
ulimit -c unlimited
{{ if .vars.proxy -}}
export HTTP_PROXY={{.vars.proxy}}
export HTTPS_PROXY="$HTTP_PROXY"
export NO_PROXY="{{.vars.no_proxy}}"
{{- end }}
{{- if .vars.cri_o }}
exec /usr/bin/crio --root=/var/lib/crio
{{- else }}
exec /usr/bin/containerd --log-level info
{{- end }}
# ------------------------------------------------------------------------
{{ template "rc_dkl" "kubelet" }}
- path: /etc/direktil/services/kubelet
mode: 0o755
content: |
#! /bin/sh
set -ex
cg=cpu,memory
cgcreate -g $cg:podruntime
cgclassify -g $cg:podruntime $$
ctr_sock="{{if .vars.cri_o}}/run/crio/crio.sock{{else}}/run/containerd/containerd.sock{{end}}"
echo "waiting for $ctr_sock"
while ! [ -e $ctr_sock ]; do sleep 1; done
#ulimit -n 1048576
mkdir -p /var/lib/kubelet/manifests
exec /usr/bin/kubelet \
--config=/etc/kubernetes/kubelet.yaml \
{{- if .vars.hostname_override }}
--hostname-override={{.vars.hostname_override}} \
{{- end }}
{{- range $k, $v := .labels }}
--node-labels={{ $k }}={{$v}} \
{{- end }}
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--hostname-override={{host_name}} \
--node-ip={{ default .vars.node_ip host_ip}}
{{ if .vars.enable_nfs }}
# -------------------------------------------------------------------------
- path: /etc/runlevels/default/rpcbind
symlink: ../../init.d/rpcbind
- path: /etc/runlevels/default/rpc.statd
symlink: ../../init.d/rpc.statd
{{ end }}
{{ if .vars.enable_nfs }}
{{ range .vars.nfs }}
- path: /etc/runlevels/default/mount-nfs-{{ .name }}
symlink: ../../init.d/dkl-svc
{{ template "rc_dkl" (print "mount-nfs-" .name) }}
- path: /etc/direktil/services/mount-nfs-{{ .name }}
mode: 0o755
content: |
#! /bin/sh
# ---
# restart: 3
while true
do
findmnt {{ .dst }} >/dev/null || {
mkdir -p {{ .dst }}
mount -t nfs -o {{ .opts }} {{ .src }} {{ .dst }}
}
sleep 60
done
{{- end }}
{{- end }}
# -------------------------------------------------------------------------
- path: /etc/crictl.yaml
content: |
{{- if .vars.cri_o }}
runtime-endpoint: "unix:///var/run/crio/crio.sock"
{{- else }}
runtime-endpoint: "unix:///run/containerd/containerd.sock"
{{- end }}
{{- if .vars.cri_o }}
- path: /etc/crio/crio.conf.d/20-novit.conf
mode: 0o640
content: |
[crio.runtime]
cgroup_manager = "cgroupfs"
conmon_cgroup = "pod"
{{- else }}
- path: /etc/containerd/config.toml
mode: 0o600
content: |
version = 2
root = "/var/lib/containerd"
state = "/run/containerd"
plugin_dir = ""
disabled_plugins = []
required_plugins = []
oom_score = 0
[grpc]
address = "/run/containerd/containerd.sock"
tcp_address = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[ttrpc]
address = ""
uid = 0
gid = 0
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
disable_tcp_service = true
stream_server_address = "127.0.0.1"
stream_server_port = "0"
stream_idle_timeout = "4h0m0s"
enable_selinux = false
sandbox_image = "registry.k8s.io/pause:3.1"
stats_collect_period = 10
systemd_cgroup = false
enable_tls_streaming = false
max_container_log_line_size = 16384
disable_cgroup = false
disable_apparmor = false
restrict_oom_score_adj = false
max_concurrent_downloads = 3
disable_proc_mount = false
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "overlayfs"
default_runtime_name = "runc"
no_pivot = false
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v1"
runtime_engine = ""
runtime_root = ""
privileged_without_host_devices = false
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
max_conf_num = 1
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
{{ $mirror_cache := .vars.docker_registries_mirror_cache }}
{{ $mirror_repos := .vars.docker_registries_mirror_repos }}
{{ if $mirror_cache.enable }}
{{- range $name, $path := $mirror_repos }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ $name }}"]
endpoint = ["http://127.0.0.1:8585{{ $path }}"{{ if $mirror_cache.remote }}, "{{ $mirror_cache.remote}}{{ $path }}"{{ end }}]
{{- end }}
{{- end }}
{{- range $name := .vars.http_registries }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ $name }}"]
endpoint = ["http://{{ $name }}"]
{{- end }}
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.snapshotter.v1.devmapper"]
root_path = ""
pool_name = ""
base_image_size = ""
{{- end }}
# -------------------------------------------------------------------------
{{ $podPidsLimit := 4096 -}}
- path: /etc/kubernetes/kubelet.yaml
mode: 0o600
content: |
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
containerRuntimeEndpoint: "unix://{{if .vars.cri_o}}/run/crio/crio.sock{{else}}/run/containerd/containerd.sock{{end}}"
staticPodPath: /var/lib/kubelet/manifests
makeIPTablesUtilChains: {{ eq .vars.kube_proxy "proxy" }}
clusterDomain: {{.cluster.domain}}
clusterDNS:
- {{.cluster.dns_svc_ip }}
podCIDR: {{.cluster.subnets.pods}}
address: 0.0.0.0
authentication:
x509:
clientCAFile: /etc/tls/cluster-client/ca.crt
anonymous:
enabled: false
maxPods: {{.vars.kubelet.maxPods}}
serializeImagePulls: false
featureGates: {}
serverTLSBootstrap: true
rotateCertificates: true
podPidsLimit: {{ $podPidsLimit }}
containerLogMaxFiles: 2
containerLogMaxSize: 16Mi
# cgroups configuration
cgroupsPerQOS: true
cgroupDriver: cgroupfs
systemReservedCgroup: openrc
systemReserved:
cpu: "{{ .vars.system_reserved.cpu }}"
memory: "{{ .vars.system_reserved.memory }}"
kubeReservedCgroup: podruntime
kubeReserved:
cpu: "{{ .vars.kube_reserved.cpu }}"
memory: "{{ .vars.kube_reserved.memory }}"
#evictionHard:
# memory.available: 100Mi
{{ .vars.extra_kubelet_config | indent " " }}
- path: /etc/kubernetes/haproxy-api.cfg
content: |
defaults
mode tcp
timeout client 2s
timeout connect 5s
timeout server 2s
timeout tunnel 1m
frontend k8s-api
bind 127.0.0.1:6444
bind [::1]:6444
mode tcp
default_backend k8s-api
backend k8s-api
mode tcp
option tcp-check
balance random
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
{{- $apiPort := .vars.control_plane.api_port -}}
{{- range $i, $host := hosts_by_group .vars.master_group }}
server {{$host.name}}_0 {{$host.ip}}:{{ $apiPort }} check
{{- end }}
{{ if and .vars.docker_registries_mirror_cache.enable (not .vars.is_master) }}
frontend dkr-reg-mirror
bind 127.0.0.1:8585
bind [::1]:8585
mode tcp
default_backend dkr-reg-mirror
backend dkr-reg-mirror
mode tcp
option tcp-check
balance random
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
{{- range $i, $host := hosts_by_group .vars.master_group }}
server {{$host.name}}_0 {{$host.ip}}:8585 check
{{- end }}
{{- end }}
{{ tls_dir "node-bootstrap" }}
- path: /etc/kubernetes/bootstrap.kubeconfig
mode: 0o600
content: |
apiVersion: v1
kind: Config
preferences: {}
current-context: local
clusters:
- cluster:
certificate-authority: /etc/tls/cluster-client/ca.crt
server: https://[::1]:6444
name: local
contexts:
- context:
cluster: local
user: kubelet-bootstrap
name: local
users:
- name: kubelet-bootstrap
user:
client-key: /etc/tls/node-bootstrap/tls.key
client-certificate: /etc/tls/node-bootstrap/tls.crt
- path: /etc/kubernetes/control-plane/kubeconfig
mode: 0o600
content: |
apiVersion: v1
kind: Config
preferences: {}
current-context: local
clusters:
- cluster:
certificate-authority: /etc/tls/cluster-client/ca.crt
server: https://[::1]:6444
name: local
contexts:
- context:
cluster: local
user: control-plane
name: local
users:
- name: control-plane
user:
token: {{ token "admin" }}
{{ if eq .vars.cloud_provider "vsphere" }}
- path: /etc/kubernetes/vsphere.conf
mode: 0o600
content: |
[Global]
vm-name = "{{host_name}}"
user = "{{.vars.vsphere.user}}"
password = "{{.vars.vsphere.password}}"
insecure-flag = "1"
datacenters = "{{.vars.vsphere.datacenter}}"
[Workspace]
server = "{{.vars.vsphere.server}}"
datacenter = "{{.vars.vsphere.datacenter}}"
folder = "{{.vars.vsphere.folder}}"
default-datastore = "{{.vars.vsphere.datastore}}"
[VirtualCenter "{{.vars.vsphere.server}}"]
[Disk]
scsicontrollertype = pvscsi
{{ end }}
{{ if .vars.is_master }}
{{ static_pods_files "/etc/kubernetes/manifests.bootstrap" }}
{{ else }}{{/* TODO merge with the bootstrap pod */}}
- path: /etc/kubernetes/manifests.bootstrap/api-haproxy.yaml
content: |
apiVersion: v1
kind: Pod
metadata:
namespace: kube-system
name: k8s-api-haproxy
labels:
component: k8s-api-haproxy
tier: control-plane
spec:
hostNetwork: true
priorityClassName: system-node-critical
automountServiceAccountToken: false
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoSchedule
containers:
- name: api-haproxy
image: haproxy:2.4.25-alpine
resources:
requests:
cpu: 10m
memory: 16Mi
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/haproxy.cfg
readOnly: true
volumes:
- name: config
hostPath:
type: File
path: /etc/kubernetes/haproxy-api.cfg
{{ end }}
- path: /etc/cron.monthly/clean-archives-logs
mode: 0o755
content: |
#! /bin/bash
find /var/log/archives/ -type f -mtime +20 -delete
- path: /etc/local.d/blockdevs.start
mode: 0o755
content: |
#! /bin/bash
for d in /sys/class/block/*/device; do
d=${d%/device}
[ $(<$d/queue/nr_requests) -ge 1024 ] || echo 1024 >$d/queue/nr_requests
[ $(<$d/queue/rotational) -eq 0 ] || echo kyber >$d/queue/scheduler
done
{{ if not .vars.disable_secure_routes }}
- path: /etc/local.d/secure-routes.start
mode: 0o755
content: |
#! /bin/bash
set -ex
if ip li add nvdummy type dummy
then
ip li set nvdummy up
ip route add {{.cluster.subnets.services}} dev nvdummy
fi
for prefix in 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 ; do
ip route list $prefix |grep -q . ||
ip route add unreachable $prefix
done
for prefix in fd00::/8 ; do
ip -6 route list $prefix |grep -q . ||
ip -6 route add unreachable $prefix
done
{{ end }}
- path: /root/host-checks.sh
mode: 0o700
content: |
#! /bin/bash
{{ if .vars.is_master }}
echo "kubelet health:"
curl --cacert /etc/tls/kubelet-client/ca.crt \
--cert /etc/tls/kubelet-client/tls.crt \
--key /etc/tls/kubelet-client/tls.key \
https://{{host_name}}:10250/healthz
echo
echo "API health:"
curl --cacert /etc/tls/apiserver/ca.crt \
--cert /etc/tls/cluster-client/tls.crt \
--key /etc/tls/cluster-client/tls.key \
https://127.0.0.1:{{ .vars.control_plane.api_port }}/healthz
echo
{{ end }}

8
hosts.incl/aux.yaml Normal file
View File

@ -0,0 +1,8 @@
#!include hosts.incl/common
labels:
"node-role.novit.nc/aux": "true"
group: aux
static_pods: node

View File

@ -1,15 +1,18 @@
#!include common
#!include hosts.incl/layers
bootstrap_config: node-bootstrap bootstrap_config: bootstrap
config: node config: system
kernel: 6.1.23
initrd: 2.1.0
versions:
init: 2.0.2
system: v23.15.0
kubernetes: v1.26.4_containerd.1.6.20
vars: vars:
boot_v2: true
bootstrap_auths:
- name: "my-user"
sshKey: "ssh-ed25519 xxx my-user"
ssh_keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILIomzqVAIqb7BedauhAo2VgbLqme5Jx/vjGUqZLoJqF gfenollar@novit.io"
is_master: false is_master: false
is_master_active: false is_master_active: false
@ -20,3 +23,13 @@ vars:
kube_reserved: kube_reserved:
cpu: 200m cpu: 200m
memory: 256Mi memory: 256Mi
extra_routes: []
kubelet:
maxPods: 250
extra_kubelet_config: ""
cri_o: false

7
hosts.incl/layers.yaml Normal file
View File

@ -0,0 +1,7 @@
kernel: 6.12.51
initrd: 2.4.3
versions:
init: 2.1.0
system: v25.40.0
kubernetes: v1.32.9_containerd.1.7.28
kubernetes-crio: v1.32.9_crio.1.32.9

View File

@ -1,7 +1,7 @@
#!include hosts.incl/common #!include hosts.incl/common
labels: labels:
"node-role.novit.io/master": "true" "node-role.novit.nc/master": "true"
group: master group: master
static_pods: master static_pods: master

View File

@ -1,7 +1,7 @@
#!include hosts.incl/common #!include hosts.incl/common
labels: labels:
"node-role.novit.io/node": "true" "node-role.novit.nc/node": "true"
group: node group: node
static_pods: node static_pods: node

View File

@ -49,12 +49,6 @@ prereqs() {
source scripts/vars source scripts/vars
} }
check_conf() {
all_clusters=$(ls $ctxdir/clusters|wc -l)
if [ "$all_clusters" != "1" ]; then
perror "Those helper scripts are not capable of running several clusters at once, check your configuration. Aborting..."
fi
}
fresh_start() { fresh_start() {
rm -rf $ctxdir/secrets rm -rf $ctxdir/secrets

View File

@ -4,7 +4,6 @@
prereqs_dls() { prereqs_dls() {
command -v docker 1>/dev/null || perror "Docker is needed, please install it and run again." command -v docker 1>/dev/null || perror "Docker is needed, please install it and run again."
systemctl is-active docker &>/dev/null || systemctl start docker systemctl is-active docker &>/dev/null || systemctl start docker
docker pull $DLS_IMG
} }
dir2config() { dir2config() {

View File

@ -60,7 +60,6 @@ run_qemu() {
source $(dirname $0)/.common source $(dirname $0)/.common
check_root check_root
prereqs prereqs
check_conf
#fresh_start #fresh_start
trap clean SIGINT SIGTERM SIGKILL trap clean SIGINT SIGTERM SIGKILL
declare -A hosts declare -A hosts

View File

@ -40,8 +40,8 @@ checkup() {
start_control_plane() { start_control_plane() {
for host in ${!hosts[*]}; do for host in ${!hosts[*]}; do
sshcmd -q root@${hosts[$host]} << EOF sshcmd -q root@${hosts[$host]} << EOF
if ls /etc/kubernetes/manifests.static/* &>/dev/null ; then if ls /etc/kubernetes/manifests.bootstrap/* &>/dev/null ; then
mv /etc/kubernetes/manifests.static/* /var/lib/kubelet/manifests/ mv /etc/kubernetes/manifests.bootstrap/* /var/lib/kubelet/manifests/
fi fi
EOF EOF
done done
@ -94,7 +94,6 @@ source $(dirname $0)/.common
prereqs prereqs
prereqs_control_plane prereqs_control_plane
prereqs_addons prereqs_addons
check_conf
unlock_store unlock_store
declare -A hosts && get_hosts declare -A hosts && get_hosts
checkup checkup

View File

@ -29,6 +29,14 @@
"server auth", "server auth",
"client auth" "client auth"
] ]
},
"bootstrap-client": {
"expiry": "2h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
} }
} }
} }

View File

@ -3,6 +3,8 @@ kind: Pod
metadata: metadata:
namespace: kube-system namespace: kube-system
name: k8s-api-haproxy name: k8s-api-haproxy
annotations:
novit.io/bootstrap-prio: "200"
labels: labels:
component: k8s-api-haproxy component: k8s-api-haproxy
tier: control-plane tier: control-plane
@ -16,7 +18,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: api-haproxy - name: api-haproxy
image: haproxy:2.4.19-alpine image: haproxy:2.8.15-alpine
{{ if .vars.control_plane.reserve_resources }} {{ if .vars.control_plane.reserve_resources }}
resources: resources:
requests: requests:

View File

@ -18,11 +18,15 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: apiserver - name: apiserver
image: {{ .vars.k8s_registry}}/kube-apiserver:{{ .vars.kubernetes_version }} image: {{ .vars.gcr_io}}/kube-apiserver:{{ .vars.kubernetes_version }}
command: command:
- kube-apiserver - kube-apiserver
- --advertise-address={{ .host.ip }}
- --secure-port={{ .vars.control_plane.api_port }} - --secure-port={{ .vars.control_plane.api_port }}
- --etcd-servers={{ range $i, $host := hosts_by_group "master" }}{{ if gt $i 0 }},{{end}}https://{{$host.ip}}:2379{{end}} - --etcd-servers={{ range $i, $host := shuffled_hosts_by_group .vars.master_group }}{{ if gt $i 0 }},{{end}}https://{{$host.ip}}:2379{{end}}
{{- if .vars.etcd_split_events }}
- --etcd-servers-overrides=/events#{{ range $i, $host := shuffled_hosts_by_group .vars.master_group }}{{ if gt $i 0 }},{{end}}https://{{$host.ip}}:2381{{end}}
{{- end }}
- --etcd-cafile=/tls/etcd-client/ca.crt - --etcd-cafile=/tls/etcd-client/ca.crt
- --etcd-keyfile=/tls/etcd-client/tls.key - --etcd-keyfile=/tls/etcd-client/tls.key
- --etcd-certfile=/tls/etcd-client/tls.crt - --etcd-certfile=/tls/etcd-client/tls.crt

View File

@ -18,7 +18,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: controller-manager - name: controller-manager
image: {{ .vars.k8s_registry}}/kube-controller-manager:{{ .vars.kubernetes_version }} image: {{ .vars.gcr_io}}/kube-controller-manager:{{ .vars.kubernetes_version }}
command: command:
- kube-controller-manager - kube-controller-manager
- --cluster-signing-cert-file=/tls-ca/cluster/ca.crt - --cluster-signing-cert-file=/tls-ca/cluster/ca.crt
@ -28,7 +28,7 @@ spec:
- --kubeconfig=/run/k8s/kubeconfig - --kubeconfig=/run/k8s/kubeconfig
- --allocate-node-cidrs - --allocate-node-cidrs
- --cluster-cidr={{ .cluster.subnets.pods }} - --cluster-cidr={{ .cluster.subnets.pods }}
- --node-cidr-mask-size=24 - --node-cidr-mask-size={{ .cluster.vars.node_cidr_mask_size }}
- --controllers=*,bootstrapsigner,tokencleaner - --controllers=*,bootstrapsigner,tokencleaner
- --authentication-kubeconfig=/run/k8s/kubeconfig - --authentication-kubeconfig=/run/k8s/kubeconfig
- --authorization-kubeconfig=/run/k8s/kubeconfig - --authorization-kubeconfig=/run/k8s/kubeconfig

View File

@ -15,12 +15,12 @@ spec:
priorityClassName: system-node-critical priorityClassName: system-node-critical
containers: containers:
- name: docker-registries-mirror - name: docker-registries-mirror
image: mcluseau/docker-registries-mirror image: novit.tech/direktil/docker-registries-mirror
command: command:
- ash - ash
- -c - -c
- | - |
ALL_HOSTS="{{ if .vars.docker_registries_mirror_cache.remote }}{{ .vars.docker_registries_mirror_cache.remote}}{{ end }}{{ range $i, $host := hosts_by_group "master" }} http://{{$host.ip}}:{{ $cache_port }}{{end}}" \ ALL_HOSTS="{{ if .vars.docker_registries_mirror_cache.remote }}{{ .vars.docker_registries_mirror_cache.remote}}{{ end }}{{ range $i, $host := hosts_by_group .vars.master_group }} http://{{$host.ip}}:{{ $cache_port }}{{end}}" \
CURRENT_HOST="http://{{ .host.ip }}:{{ $cache_port }}" \ CURRENT_HOST="http://{{ .host.ip }}:{{ $cache_port }}" \
OTHER_HOSTS="$(echo ${ALL_HOSTS/${CURRENT_HOST}/})" \ OTHER_HOSTS="$(echo ${ALL_HOSTS/${CURRENT_HOST}/})" \
sh -c '/bin/docker-registries-mirror -addr=:{{ $cache_port }} -cache-mib={{ .vars.docker_registries_mirror_cache.mb }} -peers=${OTHER_HOSTS/ /,}' sh -c '/bin/docker-registries-mirror -addr=:{{ $cache_port }} -cache-mib={{ .vars.docker_registries_mirror_cache.mb }} -peers=${OTHER_HOSTS/ /,}'

View File

@ -0,0 +1,88 @@
{{ if .vars.etcd_split_events }}
apiVersion: v1
kind: Pod
metadata:
namespace: kube-system
name: k8s-etcd
annotations:
novit.io/bootstrap-prio: "300"
labels:
component: k8s-etcd
tier: control-plane
spec:
hostNetwork: true
dnsPolicy: Default
priorityClassName: system-cluster-critical
automountServiceAccountToken: false
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoSchedule
containers:
- name: etcd
image: {{.vars.etcd_image}}:{{.vars.etcd_version}}
command:
- etcd
- --name={{ .host.name }}
- --data-dir=/var/lib/etcd
- --trusted-ca-file=/tls/etcd-server/ca.crt
- --key-file=/tls/etcd-server/tls.key
- --cert-file=/tls/etcd-server/tls.crt
- --client-cert-auth=true
- --trusted-ca-file=/tls/etcd-server/ca.crt
- --listen-client-urls=https://127.0.0.1:2381,https://{{ .host.ip }}:2381
- --advertise-client-urls=https://{{ .host.ip }}:2381
- --listen-peer-urls=https://{{ .host.ip }}:2382
- --peer-trusted-ca-file=/tls/etcd-peer/ca.crt
- --peer-key-file=/tls/etcd-peer/tls.key
- --peer-cert-file=/tls/etcd-peer/tls.crt
- --peer-client-cert-auth=true
- --initial-advertise-peer-urls=https://{{ .host.ip }}:2382
env:
- name: ETCD_INITIAL_CLUSTER
value: {{ range $i, $host := hosts_by_group .vars.master_group }}{{ if gt $i 0 }},{{end}}{{$host.name}}=https://{{$host.ip}}:2382{{end}}
- name: ETCD_INITIAL_CLUSTER_STATE
value: existing
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: '{{ token "etcd-events" }}'
- name: ETCDCTL_ENDPOINTS
value: {{ range $i, $host := hosts_by_group .vars.master_group }}{{ if gt $i 0 }},{{end}}https://{{$host.ip}}:2381{{end}}
- name: ETCDCTL_CACERT
value: /tls/etcd-peer/ca.crt
- name: ETCDCTL_CERT
value: /tls/etcd-peer/tls.crt
- name: ETCDCTL_KEY
value: /tls/etcd-peer/tls.key
{{ if .vars.control_plane.reserve_resources }}
resources:
requests:
cpu: 200m
memory: 1.2Gi
{{ end }}
volumeMounts:
- name: etc-certs
mountPath: /etc/ssl/certs
- name: tls-etcd-server
mountPath: /tls/etcd-server
- name: tls-etcd-peer
mountPath: /tls/etcd-peer
- name: k8s
mountPath: /etc/kubernetes
- name: data
mountPath: /var/lib/etcd
volumes:
- name: etc-certs
hostPath:
path: /etc/ssl/certs
- name: tls-etcd-server
hostPath:
path: /etc/tls/etcd-server
- name: tls-etcd-peer
hostPath:
path: /etc/tls/etcd-peer
- name: k8s
hostPath:
path: /etc/kubernetes
- name: data
hostPath:
path: /var/lib/etcd-events
{{ end }}

View File

@ -18,7 +18,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: etcd - name: etcd
image: {{.vars.etcd.image}}:{{.vars.etcd.version}} image: {{.vars.etcd_image}}:{{.vars.etcd_version}}
command: command:
- etcd - etcd
- --name={{ .host.name }} - --name={{ .host.name }}
@ -38,13 +38,13 @@ spec:
- --initial-advertise-peer-urls=https://{{ .host.ip }}:2380 - --initial-advertise-peer-urls=https://{{ .host.ip }}:2380
env: env:
- name: ETCD_INITIAL_CLUSTER - name: ETCD_INITIAL_CLUSTER
value: {{ range $i, $host := hosts_by_group "master" }}{{ if gt $i 0 }},{{end}}{{$host.name}}=https://{{$host.ip}}:2380{{end}} value: {{ range $i, $host := hosts_by_group .vars.master_group }}{{ if gt $i 0 }},{{end}}{{$host.name}}=https://{{$host.ip}}:2380{{end}}
- name: ETCD_INITIAL_CLUSTER_STATE - name: ETCD_INITIAL_CLUSTER_STATE
value: {{ .vars.etcd.cluster_state }} value: {{ .vars.etcd_cluster_state }}
- name: ETCD_INITIAL_CLUSTER_TOKEN - name: ETCD_INITIAL_CLUSTER_TOKEN
value: '{{ token "etcd-initial-cluster" }}' value: '{{ token "etcd-initial-cluster" }}'
- name: ETCDCTL_ENDPOINTS - name: ETCDCTL_ENDPOINTS
value: {{ range $i, $host := hosts_by_group "master" }}{{ if gt $i 0 }},{{end}}https://{{$host.ip}}:2379{{end}} value: {{ range $i, $host := hosts_by_group .vars.master_group }}{{ if gt $i 0 }},{{end}}https://{{$host.ip}}:2379{{end}}
- name: ETCDCTL_CACERT - name: ETCDCTL_CACERT
value: /tls/etcd-peer/ca.crt value: /tls/etcd-peer/ca.crt
- name: ETCDCTL_CERT - name: ETCDCTL_CERT

View File

@ -19,28 +19,32 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: keepalived - name: keepalived
image: {{.vars.keepalived.image}}:{{.vars.keepalived.version}} image: {{.vars.keepalived_image}}:{{.vars.keepalived_version}}
env: env:
- name: KEEPALIVED_AUTH_PASSWORD - name: KEEPALIVED_AUTH_PASSWORD
value: '{{ token "keepalived-vip" }}' value: '{{ token "keepalived-vip" }}'
{{- range $i, $host := hosts_by_group "master" }} {{- range $i, $host := hosts_by_group .vars.master_group }}
- name: KEEPALIVED_UNICAST_PEER_{{$i}} - name: KEEPALIVED_UNICAST_PEER_{{$i}}
value: {{ $host.ip }} value: {{ $host.ip }}
{{- end }} {{- end }}
- name: KEEPALIVED_VIRTUAL_IPADDRESS_0 - name: KEEPALIVED_VIRTUAL_IPADDRESS_0
value: {{.vars.public_vip}}/{{.vars.netmask}} value: {{.vars.public_vip}}/{{.vars.netmask}}
{{- if .vars.dmz_vip }}
- name: KEEPALIVED_VIRTUAL_IPADDRESS_1
value: {{.vars.dmz_vip}}/{{.vars.dmz_netmask}}
{{- end }}
- name: KEEPALIVED_INTERFACE - name: KEEPALIVED_INTERFACE
value: {{ .vars.vip_interface }} value: {{ .vars.vip_interface }}
{{- if .vars.keepalived.router_id }} {{- if .vars.keepalived_router_id }}
- name: KEEPALIVED_VIRTUAL_ROUTER_ID - name: KEEPALIVED_VIRTUAL_ROUTER_ID
value: "{{ .vars.keepalived.router_id }}" value: "{{ .vars.keepalived_router_id }}"
{{- end }} {{- end }}
- name: KEEPALIVED_KUBE_APISERVER_CHECK - name: KEEPALIVED_KUBE_APISERVER_CHECK
value: "true" value: "true"
- name: KUBE_APISERVER_ADDRESS - name: KUBE_APISERVER_ADDRESS
value: 127.0.0.1 value: 127.0.0.1
- name: KEEPALIVED_GARP_MASTER_REFRESH - name: KEEPALIVED_GARP_MASTER_REFRESH
value: "{{ .vars.keepalived.garp_master_refresh }}" value: "{{ .vars.keepalived_garp_master_refresh }}"
securityContext: securityContext:
capabilities: capabilities:
add: add:

View File

@ -0,0 +1,53 @@
{{ if .vars.enable_minio }}
apiVersion: v1
kind: Pod
metadata:
name: minio
labels:
app: minio
spec:
hostNetwork: true
volumes:
- name: data
hostPath:
path: /mnt/storage/k8s-pv-backup
type: BlockDevice
- name: config
emptyDir: {}
containers:
- name: minio
image: minio/minio:RELEASE.2025-01-20T14-49-07Z
imagePullPolicy: IfNotPresent
args:
- server
- /data
- --config-dir=/config
env:
- name: MINIO_ACCESS_KEY
value: {{ .vars.minio_access_key }}
- name: MINIO_SECRET_KEY
value: {{ .vars.minio_secret_key }}
livenessProbe:
httpGet:
path: /minio/login
port: 9000
httpHeaders:
- name: User-Agent
value: Mozilla
readinessProbe:
failureThreshold: 3
httpGet:
path: /minio/login
port: 9000
httpHeaders:
- name: User-Agent
value: Mozilla
initialDelaySeconds: 15
ports:
- containerPort: 9000
volumeMounts:
- name: data
mountPath: "/data"
- name: config
mountPath: "/config"
{{ end }}

View File

@ -18,7 +18,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: scheduler - name: scheduler
image: {{ .vars.k8s_registry}}/kube-scheduler:{{ .vars.kubernetes_version }} image: {{ .vars.gcr_io}}/kube-scheduler:{{ .vars.kubernetes_version }}
command: command:
- kube-scheduler - kube-scheduler
- --kubeconfig=/run/k8s/kubeconfig - --kubeconfig=/run/k8s/kubeconfig

1
static-pods/node/minio.yaml Symbolic link
View File

@ -0,0 +1 @@
../master/minio.yaml