- 创建 namespace
apiVersion: v1
kind: Namespace
metadata:
name: cicd
- 部署 nfs-server
---
kind: Service
apiVersion: v1
metadata:
namespace: cicd
name: nfs-server
labels:
app: nfs-server
spec:
type: ClusterIP # use "LoadBalancer" to get a public ip
selector:
app: nfs-server
ports:
- name: tcp-2049
port: 2049
protocol: TCP
- name: udp-111
port: 111
protocol: UDP
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-server
namespace: cicd
spec:
replicas: 1
selector:
matchLabels:
app: nfs-server
template:
metadata:
name: nfs-server
labels:
app: nfs-server
spec:
nodeName: c3-master01 # 根据自身情况修改
nodeSelector:
"kubernetes.io/os": linux
containers:
- name: nfs-server
image: itsthenetwork/nfs-server-alpine:latest
env:
- name: SHARED_DIRECTORY
value: "/exports"
volumeMounts:
- mountPath: /exports
name: nfs-vol
securityContext:
privileged: true
ports:
- name: tcp-2049
containerPort: 2049
protocol: TCP
- name: udp-111
containerPort: 111
protocol: UDP
volumes:
- name: nfs-vol
hostPath:
path: /data/nfs # modify this to specify another path to store nfs share data
type: DirectoryOrCreate
- 部署 csi-nfs
rbac
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nfs-controller-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-nfs-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
csi-nfs-controller
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-nfs-controller
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
app: csi-nfs-controller
template:
metadata:
labels:
app: csi-nfs-controller
spec:
hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: csi-nfs-controller-sa
nodeSelector:
kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
priorityClassName: system-cluster-critical
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
containers:
- name: csi-provisioner
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- "-v=2"
- "--csi-address=$(ADDRESS)"
- "--leader-election"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29652
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.1.0
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: IfNotPresent
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29652
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
volumeMounts:
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- name: socket-dir
emptyDir: {}
csi-nfs-driverinfo
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: nfs.csi.k8s.io
spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
- Ephemeral
csi-nfs-node
---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nfs-node
namespace: kube-system
spec:
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: csi-nfs-node
template:
metadata:
labels:
app: csi-nfs-node
spec:
hostNetwork: true # original nfs connection would be broken without hostNetwork setting
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: "Exists"
containers:
- name: liveness-probe
image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
- --health-port=29653
- --v=2
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
livenessProbe:
exec:
command:
- /csi-node-driver-registrar
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
initialDelaySeconds: 30
timeoutSeconds: 15
env:
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: nfs
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: mcr.microsoft.com/k8s/csi/nfs-csi:v3.1.0
args:
- "-v=5"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
ports:
- containerPort: 29653
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 30
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
resources:
limits:
memory: 300Mi
requests:
cpu: 10m
memory: 20Mi
volumes:
- name: socket-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-nfsplugin
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- 创建 StorageClass
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-csi
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: nfs.csi.k8s.io
parameters:
server: nfs-server.cicd
share: /exports
reclaimPolicy: Retain # 回收策略
volumeBindingMode: Immediate
- 创建 PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitlab-data-pvc
namespace: cicd
spec:
storageClassName: nfs-csi
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitlab-config-pvc
namespace: cicd
spec:
storageClassName: nfs-csi
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
- deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitlab
namespace: cicd
spec:
selector:
matchLabels:
app: gitlab
replicas: 1
template:
metadata:
labels:
app: gitlab
spec:
containers:
- name: gitlab
image: gitlab/gitlab-ce:17.2.1-ce.0
env:
- name: GITLAB_SKIP_UNMIGRATED_DATA_CHECK
value: "true"
- name: GITLAB_OMNIBUS_CONFIG
value: |
external_url = 'http://gitlab.example.com/'
prometheus['enable'] = false
alertmanager['enable'] = false
gitlab_rails['time_zone'] = 'Asia/Shanghai'
gitlab_rails['gitlab_email_enabled'] = false
gitlab_rails['smtp_enable'] = false
gitlab_rails['gravatar_plain_url'] = 'http://gravatar.loli.net/avatar/%{hash}?s=%{size}&d=identicon'
gitlab_rails['gravatar_ssl_url'] = 'https://gravatar.loli.net/avatar/%{hash}?s=%{size}&d=identicon'
nginx['worker_processes'] = 2
postgresql['max_connections'] = 100
postgresql['shared_buffers'] = "128MB"
ports:
- containerPort: 80
name: http
- containerPort: 443
name: https
- containerPort: 22
name: ssh
readinessProbe:
exec:
command: ["sh", "-c", "curl -s http://127.0.0.1/-/health"]
livenessProbe:
exec:
command: ["sh", "-c", "curl -s http://127.0.0.1/-/health"]
timeoutSeconds: 5
failureThreshold: 3
periodSeconds: 60
startupProbe:
exec:
command: ["sh", "-c", "curl -s http://127.0.0.1/-/health"]
failureThreshold: 20
periodSeconds: 120
resources:
requests:
memory: "4Gi"
cpu: "2"
limits:
memory: "8Gi"
cpu: "4"
volumeMounts:
- name: data
mountPath: /var/opt/gitlab
- name: config
mountPath: /etc/gitlab
- name: log
mountPath: /var/log/gitlab
- mountPath: /dev/shm
name: cache-volume
volumes:
- name: data
persistentVolumeClaim:
claimName: gitlab-data-pvc
- name: config
persistentVolumeClaim:
claimName: gitlab-config-pvc
- name: log
emptyDir: {}
- name: cache-volume
emptyDir:
medium: Memory
sizeLimit: 256Mi
- 创建 ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitlab
namespace: cicd
spec:
ingressClassName: nginx
rules:
- host: gitlab.example.com
http:
paths:
- backend:
service:
name: gitlab-svc
port:
number: 80
path: /
pathType: Prefix