Updated chart so that it worked better as a subchart, is more configurable and better documented
All checks were successful
Deploy the Helm Chart / build (push) Successful in 11s

This commit is contained in:
Alan Bridgeman 2025-12-19 17:11:14 -06:00
parent 82908c043c
commit ae8f4ad22b
11 changed files with 222 additions and 145 deletions

View file

@ -1,44 +1,44 @@
{{- if and (.Values.vault.create.enabled) (eq .Values.vault.type "hashicorp") -}}
{{- if and (.Values.create.enabled) (eq .Values.type "hashicorp") -}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-vault
name: {{ include "vault.fullname" . }}
labels:
app: {{ .Release.Name }}-vault
app: {{ include "vault.fullname" . }}
spec:
serviceName: {{ .Release.Name }}-vault
serviceName: {{ include "vault.fullname" . }}
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-vault
app: {{ include "vault.fullname" . }}
template:
metadata:
labels:
app: {{ .Release.Name }}-vault
app: {{ include "vault.fullname" . }}
spec:
# Because the Vault process runs with a non-root user inside the container,
# we need to set the fsGroup to ensure that the mounted volumes have the correct permissions
securityContext:
fsGroup: 1000
containers:
- name: {{ .Release.Name }}-vault
image: {{ .Values.vault.create.image.repository }}:{{ .Values.vault.create.image.tag }}
- name: {{ include "vault.fullname" . }}
image: {{ .Values.create.image.repository }}:{{ .Values.create.image.tag }}
ports:
- containerPort: 8200
- containerPort: 8201
{{- if .Values.vault.create.snapshotServer.enabled }}
- containerPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- if .Values.create.snapshotServer.enabled }}
- containerPort: {{ .Values.create.snapshotServer.internalPort }}
{{- end }}
env:
- name: VAULT_ADDR
value: http://0.0.0.0:8200
- name: ROLE_ID_SECRET_NAME
value: USER_PASSWORD_DEFAULT_VAULT_ROLE_ID
value: {{ .Values.create.appRole.roleIDSecretName }}
- name: SECRET_ID_SECRET_NAME
value: USER_PASSWORD_DEFAULT_VAULT_SECRET_ID
{{- if .Values.vault.create.snapshotServer.enabled }}
value: {{ .Values.create.appRole.secretIDSecretName }}
{{- if .Values.create.snapshotServer.enabled }}
- name: SNAPSHOT_SERVER_PORT
value: {{ .Values.vault.create.snapshotServer.internalPort | quote }}
value: {{ .Values.create.snapshotServer.internalPort | quote }}
{{- end }}
volumeMounts:
- name: vault-data
@ -55,24 +55,24 @@ spec:
- name: vault-log
emptyDir: {}
- name: vault-creds
{{- if .Values.vault.create.snapshotServer.enabled }}
{{- if .Values.create.snapshotServer.enabled }}
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-creds
claimName: {{ include "vault.fullname" . }}-creds
{{- else }}
emptyDir: {}
{{- end }}
- name: vault-role-vars
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-role-vars
claimName: {{ include "vault.fullname" . }}-role-vars
# To have data consistency across pod restarts, we use a volume claim template
# We do this instead of a static PVC because we require less control and it's easier to manage
volumeClaimTemplates:
- metadata:
name: vault-data
spec:
storageClassName: rook-ceph-block
accessModes: [ "ReadWriteOnce" ]
storageClassName: {{ .Values.create.vaultData.storageClassName }}
accessModes: {{ .Values.create.vaultData.accessModes }}
resources:
requests:
storage: 2Gi
storage: {{ .Values.create.vaultData.size }}
{{- end -}}