custom-hashicorp-vault-helm/templates/vault-statefulset.yaml
Alan Bridgeman ae8f4ad22b
All checks were successful
Deploy the Helm Chart / build (push) Successful in 11s
Updated chart so that it worked better as a subchart, is more configurable and better documented
2025-12-19 17:11:14 -06:00

78 lines
No EOL
2.7 KiB
YAML

{{- if and (.Values.create.enabled) (eq .Values.type "hashicorp") -}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "vault.fullname" . }}
labels:
app: {{ include "vault.fullname" . }}
spec:
serviceName: {{ include "vault.fullname" . }}
replicas: 1
selector:
matchLabels:
app: {{ include "vault.fullname" . }}
template:
metadata:
labels:
app: {{ include "vault.fullname" . }}
spec:
# Because the Vault process runs with a non-root user inside the container,
# we need to set the fsGroup to ensure that the mounted volumes have the correct permissions
securityContext:
fsGroup: 1000
containers:
- name: {{ include "vault.fullname" . }}
image: {{ .Values.create.image.repository }}:{{ .Values.create.image.tag }}
ports:
- containerPort: 8200
- containerPort: 8201
{{- if .Values.create.snapshotServer.enabled }}
- containerPort: {{ .Values.create.snapshotServer.internalPort }}
{{- end }}
env:
- name: VAULT_ADDR
value: http://0.0.0.0:8200
- name: ROLE_ID_SECRET_NAME
value: {{ .Values.create.appRole.roleIDSecretName }}
- name: SECRET_ID_SECRET_NAME
value: {{ .Values.create.appRole.secretIDSecretName }}
{{- if .Values.create.snapshotServer.enabled }}
- name: SNAPSHOT_SERVER_PORT
value: {{ .Values.create.snapshotServer.internalPort | quote }}
{{- end }}
volumeMounts:
- name: vault-data
mountPath: /vault/data
- name: vault-log
mountPath: /vault/logs
- name: vault-creds
mountPath: /vault/creds
- name: vault-role-vars
mountPath: /role_vars
capAdd:
- IPC_LOCK
volumes:
- name: vault-log
emptyDir: {}
- name: vault-creds
{{- if .Values.create.snapshotServer.enabled }}
persistentVolumeClaim:
claimName: {{ include "vault.fullname" . }}-creds
{{- else }}
emptyDir: {}
{{- end }}
- name: vault-role-vars
persistentVolumeClaim:
claimName: {{ include "vault.fullname" . }}-role-vars
# To have data consistency across pod restarts, we use a volume claim template
# We do this instead of a static PVC because we require less control and it's easier to manage
volumeClaimTemplates:
- metadata:
name: vault-data
spec:
storageClassName: {{ .Values.create.vaultData.storageClassName }}
accessModes: {{ .Values.create.vaultData.accessModes }}
resources:
requests:
storage: {{ .Values.create.vaultData.size }}
{{- end -}}