Major change that brought the chart in line with others. Mainly by more heavily relyng on subcharts and *.tpl files for code modularity and reusability (and consistency)
All checks were successful
Deploy the Helm Chart / build (push) Successful in 15s
All checks were successful
Deploy the Helm Chart / build (push) Successful in 15s
This commit is contained in:
parent
84a322eb28
commit
563a76b84e
34 changed files with 621 additions and 1103 deletions
61
templates/_sidecars.tpl
Normal file
61
templates/_sidecars.tpl
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
{{- define "backupSidecar" -}}
|
||||
- name: {{ .Values.backupSidecar.name }}
|
||||
image: {{ .Values.backupSidecar.image.repository }}:{{ .Values.backupSidecar.image.tag }}
|
||||
imagePullPolicy: {{ .Values.backupSidecar.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.backupSidecar.port }}
|
||||
env:
|
||||
# Release name (used to identify the service/release the backups came from in remote storage)
|
||||
- name: RELEASE_NAME
|
||||
value: {{ .Release.Name }}
|
||||
{{- include "db.envVars" . | nindent 2 -}}
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
- name: VAULT_NAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-pass-vault-secret
|
||||
key: vault-name
|
||||
- name: VAULT_SNAPSHOT_SERVER_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-pass-vault-snapshot-config
|
||||
key: port
|
||||
{{- end }}
|
||||
{{- if eq .Values.nosql.type "mongodb" }}
|
||||
# NoSQL storage related environment variables
|
||||
# Note, we only worry about self-hosted options as cloud-based should have their own backups etc...
|
||||
- name: STORAGE_ACCOUNT_CONNECTION_STRING
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-mongo-credentials
|
||||
key: connection-string
|
||||
{{- end }}
|
||||
# Redis is used for BullMQ, which is how we schedule backups
|
||||
# We use this instead of, for instance cron jobs, as it lets us deal with failures
|
||||
{{- include "cache.envVars" . | nindent 2 }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .Values.backupSidecar.resources.requests.cpu }}
|
||||
memory: {{ .Values.backupSidecar.resources.requests.memory }}
|
||||
ephemeral-storage: {{ .Values.backupSidecar.resources.requests.ephemeralStorage }}
|
||||
limits:
|
||||
cpu: {{ .Values.backupSidecar.resources.limits.cpu }}
|
||||
memory: {{ .Values.backupSidecar.resources.limits.memory }}
|
||||
ephemeral-storage: {{ .Values.backupSidecar.resources.limits.ephemeralStorage }}
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
volumeMounts:
|
||||
# Mount for a shared volume for Vault credentials
|
||||
# This is separate from the app's `role vars` volume because it includes other credentials
|
||||
# In particular, the unseal keys which we require when/if we restore from the backup
|
||||
# This volume is also read-only where the `role-vars` is read-write (see description below for why)
|
||||
- name: creds
|
||||
mountPath: /vault-creds
|
||||
readOnly: true
|
||||
# Mount for a shared volume for the Vault's role variables for the app
|
||||
# This is required by the backup sidecar because if a restart of the app occurs AFTER a vault has been reset (ex. vault using a different container instance),
|
||||
# despite the vault data being restored the app would receive incorrect credentials (because this is ONLY written during setup of the vault)
|
||||
# The backup sidecar mitigates this by doing it's own write (to overwrite) once it's done a restore
|
||||
- name: role-vars
|
||||
mountPath: /role_vars
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
Loading…
Add table
Add a link
Reference in a new issue