Initial code commit

This commit is contained in:
Alan Bridgeman 2025-05-12 19:22:55 -05:00
parent 5b71cecab6
commit b3382d880b
37 changed files with 1778 additions and 1 deletions

358
.github/workflows/deploy.yml vendored Normal file
View file

@ -0,0 +1,358 @@
name: Deploy the Helm Chart
on:
push:
branches:
- main
workflow_dispatch:
jobs:
# "Build" from the source code
build:
runs-on: self-hosted
outputs:
chart-name: ${{ steps.update-helm-repo.outputs.CHART_NAME }}
steps:
# Get the source code from the repository
- name: Checkout repository
uses: actions/checkout@v4
# "Build"/package the source code int the appropriate format (Helm chart etc...)
- name: Update Helm Repository (if needed)
id: update-helm-repo
run: |
# Parse the chart name from the Chart.yaml
CHART_NAME=$(yq '.name' Chart.yaml)
CHART_NAME=${CHART_NAME#\"} # Remove leading quote
CHART_NAME=${CHART_NAME%\"} # Remove trailing quote
echo "CHART_NAME=$CHART_NAME" >> $GITHUB_OUTPUT
echo "Chart Name: $CHART_NAME"
# Note, this depends on the [Harbor Helm Index](https://github.com/AlanBridgeman/harbor-helm-index) pretty heavily
# In particular, that tool allows us to treat what is an OCI registry as a Helm repository (which includes using `helm search repo`)
helm repo add BridgemanAccessible https://helm.bridgemanaccessible.ca
# Check if the chart is already in the repository or not
REMOTE_CHART_WORK_OUTPUT=$(helm search repo BridgemanAccessible/$CHART_NAME 2>/dev/null || echo "")
if [ -n "$REMOTE_CHART_WORK_OUTPUT" ]; then
# The chart is already in the repository, so we need to check if the version is the same or not
echo "Chart already exists in the repository. Checking version..."
IFS=' ' read -r -a REMOTE_VERSIONS <<< $(helm search repo BridgemanAccessible/$CHART_NAME --output json | jq '.[].version | @sh')
echo "Remote Chart Versions: ${REMOTE_VERSIONS[@]}"
else
# The chart is not in the repository, so we'll need to add it
echo "Chart not found in the repository. Adding it..."
# Set a blank value so that it WON'T match the local version
IFS=' ' read -r -a REMOTE_VERSIONS <<< ""
fi
# Just to keep things clean/safe/etc... remove the repo
helm repo remove BridgemanAccessible
# Get the local version from the Chart.yaml file
LOCAL_VERSION=$(yq '.version' Chart.yaml)
LOCAL_VERSION=${LOCAL_VERSION#\"} # Remove leading quote
LOCAL_VERSION=${LOCAL_VERSION%\"} # Remove trailing quote
echo "Local Chart Version: $LOCAL_VERSION"
has_match='false'
if [ ${#REMOTE_VERSIONS[@]} -gt 0 ]; then
# Loop through the remote tags and check if any of them match the local version
for REMOTE_VERSION in ${REMOTE_VERSIONS[@]}; do
REMOTE_VERSION=${REMOTE_VERSION#\'} # Remove leading quote
REMOTE_VERSION=${REMOTE_VERSION%\'} # Remove trailing quote
# Check if the remote tag is the same as the local tag
if [ "$REMOTE_VERSION" == "$LOCAL_VERSION" ]; then
echo "Remote version matches local version!"
has_match='true'
break
fi
done
fi
# If the versions match, we want to increment the chart's patch version
if [ "has_match" ]; then
echo "Versions match!"
# Increment the patch version of the local version (Ex. 1.0.0 -> 1.0.1)
IFS='.' read -r major minor patch <<< "$LOCAL_VERSION"
patch=$((patch + 1))
NEW_LOCAL_VERSION="$major.$minor.$patch"
echo "New Local Version: $NEW_LOCAL_VERSION"
echo "Committing new chart version change..."
sed -i "s|version: \"$LOCAL_VERSION\"|version: \"$NEW_LOCAL_VERSION\"|g" Chart.yaml
LOCAL_VERSION=$NEW_LOCAL_VERSION
# Update remote URL to use the GITHUB_TOKEN for authentication
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
# Setup git user details for committing the version change and tag
git config user.name "GitHub Actions"
git config user.email "actions@github.com"
# Commit the version change to the `package.json` file
git add Chart.yaml
git commit -m "[Github Actions] Update Helm chart version to $(yq -r '.version' Chart.yaml)"
# Push the changes to the repository
git push origin HEAD:main
else
echo "Versions do not match!"
fi
helm package .
helm push ./$CHART_NAME-$LOCAL_VERSION.tgz oci://${{ secrets.REPOSITORY_HOSTNAME }}/helm
# Deploy to our environment (stagging cluster)
deploy:
runs-on: self-hosted
needs: build
outputs:
backup-created: ${{ steps.backup.outputs.created }}
restore-time: ${{ steps.deploy-helm-chart.outputs.RESTORE_TIME }}
env:
NAMESPACE: ciam-service-dashboard
RELEASE_NAME: services
CHART_NAME: ${{ needs.build.outputs.chart-name }}
steps:
# Check if it's already deployed
# This determines if we do the backup and takedown before deploying the new version
- name: Already Deployed Check
id: already-deployed-check
run: |
# Check if the Helm chart is already deployed
if helm status -n ${{ env.NAMESPACE }} ${{ env.RELEASE_NAME }}; then
echo "Already deployed"
echo "already-deployed=true" >> $GITHUB_OUTPUT
else
echo "Not deployed"
echo "already-deployed=false" >> $GITHUB_OUTPUT
fi
# Tale a backup of the current state of the resources
- name: Backup Data
id: backup
if: steps.already-deployed-check.outputs.already-deployed == 'true'
run: |
# -- Setup Backup Sidecar metadata variables --
# Variable for the image name (so that there can't be typos etc...)
BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
# Get the available tags from the image registry
IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
# Get the latest tag from the list of tags
LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
# Get/Generate the "full" image name (including the tag) for the backup sidecar
FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
# -- END: Setup Backup Sidecar metadata variables --
# -- Create a backup --
# Get the name of the running main pod (the one that has the backup sidecar container in it)
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
echo "Running Main Pod: $RUNNING_MAIN_POD"
# Make the request to create the backups
CREATE_BACKUP_OUTPUT=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.RELEASE_NAME }} -- /bin/bash -c "curl -X PUT -sSL http://localhost:4000/backup -o backup-output.txt && cat backup-output.txt")
echo "Create Backup Output: $CREATE_BACKUP_OUTPUT"
# Parse the output to check if the backup was created successfully
BACKUP_OUTPUT_MESSAGE=$(echo $CREATE_BACKUP_OUTPUT | jq -r '.message')
if [ "$BACKUP_OUTPUT_MESSAGE" == "Backup created successfully" ]; then
echo "Backup created successfully."
echo "created=true" >> $GITHUB_OUTPUT
else
echo "Error creating backup: $BACKUP_OUTPUT_MESSAGE"
exit 1
fi
# -- END: Create a backup --
# Because we've taken a backup of it's current state and it's easier to start fresh etc...
# We delete all the existing resources (including the Helm chart) before (re-)deploying the new one
- name: Remove old resources
if: steps.backup.outputs.created == 'true'
run: |
# Use a custom script (found in [Helpful Scripts]()) to uninstall the Helm chart and delete any associated resources
k8s-delete \
--namespace ${{ env.NAMESPACE }} \
--release-name ${{ env.RELEASE_NAME }} \
--rwx-volumes vault-role-vars
# Deploy the resources to the cluster
- name: Deploy Helm Chart
id: deploy-helm-chart
run: |
FILLED_VALUES_FILE="values.filled.yaml"
# Download a filled version of the `values.yaml` file from a secure location
DOWNLOAD_FILE=$(curl -sSL https://secure-storage.bridgemanaccessible.ca/services-dashboard/filled-values?token=${{ secrets.SECURE_STORAGE_TOKEN }} -o $FILLED_VALUES_FILE || echo "Failed to download filled values file.")
if [ "$DOWNLOAD_FILE" == "Failed to download filled values file." ]; then
echo "Error: $DOWNLOAD_FILE"
exit 1
fi
if [ -n "${{ steps.backup.outputs.created }}" ]; then
CURR_DATETIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
echo "Setting restoreFromBackup to: $CURR_DATETIME"
# If the backup was created successfully, we want to set the `restoreFromBackup` variable to true in the filled values file
sed -i "s|restoreFromBackup: \"\"|restoreFromBackup: \"$CURR_DATETIME\"|g" $FILLED_VALUES_FILE
echo "RESTORE_TIME=$CURR_DATETIME" >> $GITHUB_OUTPUT
fi
# Parse the chart name from the Chart.yaml
CHART_NAME=${{ env.CHART_NAME }}
echo "Chart Name: $CHART_NAME"
# Can run `k8s-deploy --help` if you want to see all the options available
k8s-deploy \
--namespace ${{ env.NAMESPACE }} \
--release-name ${{ env.RELEASE_NAME }} \
--filled-values-file $FILLED_VALUES_FILE \
--chart-name $CHART_NAME \
--rwx-volumes vault-role-vars
# Once deployed, we want to restore it to it's previous state (if applicable)
restore:
runs-on: self-hosted
needs: deploy
if: needs.deploy.outputs.backup-created == 'true'
env:
NAMESPACE: ciam-services-dashboard
CONTAINER_NAME: services
RESTORE_TIME: ${{ needs.deploy.outputs.restore-time }}
SITE_HOSTNAME: services.bridgemanaccessible.ca
steps:
# Restore the data from the backup
- name: Restore data
run: |
# -- Setup Backup Sidecar metadata variables --
# Variable for the image name (so that there can't be typos etc...)
BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
# Get the available tags from the image registry
IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
# Get the latest tag from the list of tags
LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
# Get/Generate the "full" image name (including the tag) for the backup sidecar
FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
# -- END: Setup Backup Sidecar metadata variables --
# Get the name of the running main pod (the one that has the backup sidecar container in it)
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
echo "Running Main Pod: $RUNNING_MAIN_POD"
# Variables for retry logic
MAX_RETRIES=5
RETRY_INTERVAL=30
RETRY_COUNT=0
# Because Kubernetes (or at least our stagging cluster) can be a bit tempramental,
# We allow for "resetting" a few times.
# By resetting, we mean re-detecting the main running pod etc...
MAX_RESETS=3
RESET_COUNT=0
POD_STATUS=$(kubectl -n ${{ env.NAMESPACE }} get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
echo "Pod Status: $POD_STATUS"
# Retry (wait) a few times if the pod isn't running yet
while [ "$POD_STATUS" != "Running" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
echo "Pod is not running yet (Current Status: $POD_STATUS). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
sleep $RETRY_INTERVAL
# Get the current status of the pod
POD_STATUS=$(kubectl -n ciam-account-dashboard get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
# Increment the retry count
RETRY_COUNT=$((RETRY_COUNT + 1))
# If the pod is in a failed state, we want to "reset"
# Though, we only want to reset a few times (to avoid infinite loops if something else is wrong etc...)
if [[ "$POD_STATUS" == "Failed" ]] && [ $RESET_COUNT -lt $MAX_RESETS ]; then
echo "Pod is in a failed state. Resetting..."
# Reset the pod and increment the reset count
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" --arg prev "$RUNNING_MAIN_POD" '.items[] | select(.spec.containers[].image == $img and .metadata.name != $prev) | .metadata.name')
echo "Running Main Pod Reset: $RUNNING_MAIN_POD"
# Reset the retry count
RETRY_COUNT=0
echo "Retry Count Reset"
RESET_COUNT=$((RESET_COUNT + 1))
fi
done
if [ "$POD_STATUS" != "Running" ]; then
echo "Error: Pod did not start running after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
exit 1
fi
LATEST_BACKUPS=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -sSL http://localhost:4000/backups/latest -o latest-backup.json && cat latest-backup.json")
echo "Latest Backups: $LATEST_BACKUPS"
LATEST_DB_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.db')
echo "Latest DB Backup: $LATEST_DB_BACKUP"
LATEST_VAULT_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.vault')
echo "Latest Vault Backup: $LATEST_VAULT_BACKUP"
echo "Restoring Vault Backup: $LATEST_VAULT_BACKUP at restore time: ${{ env.RESTORE_TIME }}"
restore_output=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -s -X POST -H 'Content-Type: application/json' -d '{ \"restoreTime\": \"${{ env.RESTORE_TIME }}\" }' http://localhost:4000/restore/latest -o restore-output.txt && cat restore-output.txt")
echo "Restore Output: $restore_output"
# Verify that the site starts up after the restore
- name: Verify Restore
run: |
SITE_REQUEST_STATUS_CODE=$(curl -sSL https://$SITE_HOSTNAME -w '%{http_code}' -o /dev/null)
MAX_RETRIES=5
RETRY_INTERVAL=30
RETRY_COUNT=0
# Retry (wait) a few times if the site isn't up yet
while [ "$SITE_REQUEST_STATUS_CODE" != "200" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
echo "Site is not up yet (Current Status Code: $SITE_REQUEST_STATUS_CODE). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
sleep $RETRY_INTERVAL
# Get the current status of the site
SITE_REQUEST_STATUS_CODE=$(curl -sSL http://account.bridgemanaccessible.ca -w '%{http_code}' -o /dev/null)
# Increment the retry count
RETRY_COUNT=$((RETRY_COUNT + 1))
done
if [ "$SITE_REQUEST_STATUS_CODE" != "200" ]; then
echo "Error: Site did not start up after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
exit 1
fi
echo "Site is up and running (Status Code: $SITE_REQUEST_STATUS_CODE)."

9
.gitignore vendored Normal file
View file

@ -0,0 +1,9 @@
# Exclude the filled values.yaml file
values.filled.yaml
# Exclude any packaged charts
services-dashboard-*.tgz
# Random other stuff
templates.old
input.json

21
.helmignore Normal file
View file

@ -0,0 +1,21 @@
# Ignore the ignore file
.helmignore
# Ignore the Helm chart's packaged tarball
*.tgz
# Ignore git related files
.git
.gitignore
# Ignore github automation files
.github
# Ignore inputs file
input.json
# Ignore the filled in values file
values.filled.yaml
# Ignore .old (backup) files
*.old*

11
Chart.yaml Normal file
View file

@ -0,0 +1,11 @@
apiVersion: v1
appVersion: "1.0.298"
description: A Helm chart for deploying Bridgeman Accessible's Services Dashboard
home: https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca-helm
maintainers:
- email: info@bridgemanaccessible.ca
name: Bridgeman Accessible
name: services-dashboard
sources:
- https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca
version: "1.5.1"

View file

@ -1,2 +1,20 @@
# services.bridgemanaccessible.ca-helm
# Services Dashboard Helm Chart
A Helm chart for deploying Bridgeman Accessible's Services Dashboard
## Automation (Githu Actions)
This repository is setup with CI/CD automation that allows for easy deployment when new code is pushed to the repository.
This workflow can be broken down into a few key actions:
1. Build a new version of the Helm Chart
2. If already deployed, backup old data
3. Redeploy the Helm Chart (by deleting and then re-creating)
4. Restore the data using the created backup, if applicable
### New Version
If the Chart version matches a version already in the remote registry/repository the patch number will be automatically incremented.
### Creating backup of current status
Note, there is an inherent assumption that the pod is currently up and running (stable)
### Restoring data from backup
Can fail if redeployment doesn't go smoothly though tries to accommodate for common issues.

View file

@ -0,0 +1,9 @@
{{- if and (.Values.comms.email.enabled) (eq .Values.comms.email.type "acs") -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-email-secret
type: opaque
data:
connection-string: {{ .Values.comms.email.connectionString | b64enc }}
{{- end -}}

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-cache-configmap
namespace: {{ .Release.Namespace }}
data:
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) }}
hostname: {{ .Release.Name }}-redis
{{- else }}
hostname: {{ .Values.cache.hostname }}
{{- end }}
port: {{ .Values.cache.port | quote }}

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-cache-credentials
type: Opaque
data:
password: {{ .Values.cache.password | b64enc }}

37
templates/database.yaml Normal file
View file

@ -0,0 +1,37 @@
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
apiVersion: postgresql.org/v1
kind: PostgresDatabase
metadata:
name: {{ .Release.Name }}-db
namespace: {{ .Release.Namespace }}
spec:
dbName:
envFrom:
configMapKeyRef:
- name: {{ .Release.Name }}-db-credentials
namespace: postgres-controller
key: db-name
dbRoleName:
envFrom:
configMapKeyRef:
- name: {{ .Release.Name }}-db-credentials
namespace: postgres-controller
key: db-user
dbRolePassword:
envFrom:
secretKeyRef:
- name: {{ .Release.Name }}-db-password
namespace: postgres-controller
key: password
# Because we've adopted a "throw away"/"ephemeral"/"container-esk" approach to our database, we want it to be dropped/deleted when everything else is deleted.
# This is because we re-create it and restore from a backup on every deploy.
# Which helps keep the data current and reinforces the utility of the backup and restore systems.
onDeletion:
# Whether to drop the database when the resource is deleted
dropDB: true
# Whether to drop the role when the resource is deleted
dropRole: true
{{- if .Values.database.instance_id }}
dbInstanceId: {{ .Values.database.instance_id }}
{{- end }}
{{- end -}}

View file

@ -0,0 +1,16 @@
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-db-credentials
namespace: postgres-controller
data:
db-host: {{ .Values.database.host }}
db-name: {{ .Values.database.name }}
db-user: {{ .Values.database.user }}
{{- if .Values.database.port }}
db-port: {{ .Values.database.port | quote }}
{{- else }}
db-port: "5432"
{{- end }}
{{- end -}}

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-db-credentials
data:
db-host: {{ .Values.database.host }}
db-name: {{ .Values.database.name }}
db-user: {{ .Values.database.user }}
{{- if .Values.database.port }}
db-port: {{ .Values.database.port | quote }}
{{- else }}
db-port: "5432"
{{- end }}

View file

@ -0,0 +1,10 @@
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-db-password
namespace: postgres-controller
type: Opaque
data:
password: {{ .Values.database.password | b64enc }}
{{- end -}}

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-db-password
type: Opaque
data:
password: {{ .Values.database.password | b64enc }}

418
templates/deployment.yaml Normal file
View file

@ -0,0 +1,418 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
labels:
app: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ .Release.Name }}
spec:
containers:
- name: {{ .Release.Name }}
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: {{ .Values.container.port }}
env:
- name: NODE_ENV
value: {{ .Values.container.env }}
- name: PORT
value: "{{ .Values.container.port }}"
- name: DOMAIN
value: "bridgemanaccessible.ca"
- name: ACCOUNTS_DEV_PORT
value: "9010"
- name: ACCOUNTS_SUBDOMAIN_PREFIX
value: "account"
# OAuth Implementation Stuff
- name: BASE_APP_URL
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-oauth-credentials
key: base-app-url
- name: APP_ABBRV
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-oauth-credentials
key: app-abbreviation
- name: APP_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-oauth-credentials
key: app-name
- name: SERVICE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-oauth-credentials
key: service-name
- name: DEV_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-oauth-credentials
key: dev-port
# Database credentials
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-host
- name: DB_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-name
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-db-password
key: password
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-port
- name: DB_USER
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-user
# NoSQL Credentials
{{- if eq .Values.nosql.type "mongodb" }}
- name: STORAGE_ACCOUNT_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-mongo-credentials
key: connection-string
{{- else if eq .Values.nosql.type "azure" }}
- name: STORAGE_ACCOUNT_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-azure-tables-credentials
key: key
- name: STORAGE_ACCOUNT_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-azure-tables-config
key: name
{{- end }}
# NoSQL Grouping Names
- name: ACCESS_PROPERTIES_STORAGE_TABLE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-nosql-grouping
key: access-properties
- name: LOCALE_PROPERTIES_STORAGE_TABLE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-nosql-grouping
key: locales
- name: ORDER_PROPERTIES_STORAGE_TABLE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-nosql-grouping
key: order-properties
- name: PRICE_PROPERTIES_STORAGE_TABLE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-nosql-grouping
key: price-properties
- name: SERVICE_CATEGORY_PROPERTIES_STORAGE_TABLE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-nosql-grouping
key: service-category-properties
- name: SERVICE_PROPERTIES_STORAGE_TABLE_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-nosql-grouping
key: service-properties
# -- Secrets Vault (Hashicorp Vault OR Azure Key Vault) --
{{- if .Values.vault.enabled }}
{{- if eq .Values.vault.type "azure" }}
- name: KEYVAULT_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: client-id
- name: KEYVAULT_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: client-secret
- name: KEYVAULT_NAME
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: name
- name: KEYVAULT_TENANT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: tenant-id
{{- else if eq .Values.vault.type "hashicorp" }}
- name: VAULT_NAME
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: vault-name
- name: VAULT_PORT
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: vault-port
{{- end }}
{{- end }}
# Caching Server Variables
- name: CACHE_HOSTNAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: hostname
- name: CACHE_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: port
- name: CACHE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-cache-credentials
key: password
# Email (Azure Communication Services API credentials, etc...)
{{- if and (.Values.comms.email.enabled) (eq .Values.comms.email.type "acs") }}
- name: EMAIL_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-email-secret
key: connection-string
{{- end }}
# Third-Party Integrations
{{- if .Values.thirdParty.stripe.enabled }}
- name: STRIPE_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: public-key
- name: STRIPE_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: secret-key
- name: STRIPE_TEST_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: test-public-key
- name: STRIPE_TEST_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: test-secret-key
{{- end }}
{{- if .Values.thirdParty.moneris.enabled }}
- name: MONERIS_MERCHANT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: merchant-id
- name: MONERIS_STORE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: store-id
- name: MONERIS_HT_PROFILE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: ht-profile-id
- name: MONERIS_APP_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: app-id
- name: MONERIS_APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: app-secret
- name: MONERIS_TEST_MERCHANT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-merchant-id
- name: MONERIS_TEST_STORE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-store-id
- name: MONERIS_TEST_HT_PROFILE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-ht-profile-id
- name: MONERIS_TEST_APP_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-app-id
- name: MONERIS_TEST_APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-app-secret
{{- end }}
# Logging Sidecar related environment variables
{{- if .Values.loggingSidecar.enabled }}
- name: LOGGING_SIDE_CAR_PORT
value: {{ .Values.loggingSidecar.port | quote }}
{{- end }}
# Due to subtleties related to how the entrypoint scripts detects how/when to proceed
# This environment variable indicates if the entrypoint should wait for a restore to complete
{{- if ne .Values.container.restoreFromBackup "" }}
- name: RESTORE_FROM_BACKUP
value: {{ .Values.container.restoreFromBackup | quote }}
{{- end }}
{{- if .Values.vault.create.enabled }}
volumeMounts:
# Mount for a shared volume for app's vault credentials
# Note, this is required because the credentials are created as part of the vault's setup and not known ahead of time
# This seems a better approach than passing, as an example, a preset root token as an environment variable for a few reasons:
# 1. This way even if the app's credentials become compromised their more limited in scope/permissions
# 2. This creates a semi-unintentional wait lock for the app until the vault is ready (and the credentials are created)
- name: role-vars
mountPath: /role_vars
readOnly: true
{{- end }}
# Logging sidecar for sending logs to a log aggregator
{{- if .Values.loggingSidecar.enabled }}
- name: {{ .Values.loggingSidecar.name }}
image: {{ .Values.loggingSidecar.image.repository }}:{{ .Values.loggingSidecar.image.tag }}
imagePullPolicy: {{ .Values.loggingSidecar.image.pullPolicy }}
ports:
- containerPort: {{ .Values.loggingSidecar.port }}
env:
- name: PORT
value: {{ .Values.loggingSidecar.port | quote }}
# Log aggregator (OpenObserve) auth variables
- name: LOGGER_AUTH_USERNAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-logging-sidecar-credentials
key: username
- name: LOGGER_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-logging-sidecar-password
key: password
{{- end }}
# Backup sidecar for backing up service data
{{- if .Values.backupSidecar.enabled }}
- name: {{ .Values.backupSidecar.name }}
image: {{ .Values.backupSidecar.image.repository }}:{{ .Values.backupSidecar.image.tag }}
imagePullPolicy: {{ .Values.backupSidecar.image.pullPolicy }}
ports:
- containerPort: {{ .Values.backupSidecar.port }}
env:
# Release name (used to identify the service/release the backups came from in remote storage)
- name: RELEASE_NAME
value: {{ .Release.Name }}
# Database related environment variables
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-host
- name: DB_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-name
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-db-password
key: password
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-port
- name: DB_USER
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-user
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: VAULT_NAME
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-pass-vault-secret
key: vault-name
- name: VAULT_SNAPSHOT_SERVER_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-vault-snapshot-config
key: port
{{- end }}
# NoSQL storage related environment variables
# Note, we only worry about self-hosted options as cloud-based should have their own backups etc...
{{- if eq .Values.nosql.type "mongodb" }}
- name: STORAGE_ACCOUNT_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-mongo-credentials
key: connection-string
{{- end }}
# Redis related environment variables
# Redis is used for BullMQ, which is how we schedule backups
# We use this instead of, for instance cron jobs, as it lets us deal with failures
- name: REDIS_HOSTNAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: hostname
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: port
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-cache-credentials
key: password
{{- if .Values.passVault.create.snapshotServer.enabled }}
volumeMounts:
# Mount for a shared volume for Vault credentials
# This is separate from the app's `role vars` volume because it includes other credentials
# In particular, the unseal keys which we require when/if we restore from the backup
# This volume is also read-only where the `role-vars` is read-write (see description below for why)
- name: creds
mountPath: /vault-creds
readOnly: true
# Mount for a shared volume for the Vault's role variables for the app
# This is required by the backup sidecar because if a restart of the app occurs AFTER a vault has been reset (ex. vault using a different container instance),
# despite the vault data being restored the app would receive incorrect credentials (because this is ONLY written during setup of the vault)
# The backup sidecar mitigates this by doing it's own write (to overwrite) once it's done a restore
- name: role-vars
mountPath: /role_vars
{{- end }}
{{- end }}
{{- if .Values.vault.create.enabled }}
volumes:
- name: role-vars
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-role-vars
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: creds
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-creds
{{- end }}
{{- end }}

21
templates/ingress.yaml Normal file
View file

@ -0,0 +1,21 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
number: 80
ingressClassName: {{ .Values.ingress.class }}
{{- end -}}

View file

@ -0,0 +1,10 @@
{{- if .Values.loggingSidecar.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-logging-sidecar-credentials
labels:
app: {{ .Values.loggingSidecar.name }}
data:
username: {{ .Values.loggingSidecar.auth.username | quote }}
{{- end -}}

View file

@ -0,0 +1,11 @@
{{- if .Values.loggingSidecar.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-logging-sidecar-password
labels:
app: {{ .Values.loggingSidecar.name }}
type: opaque
data:
password: {{ .Values.loggingSidecar.auth.password | b64enc | quote }}
{{- end -}}

View file

@ -0,0 +1,20 @@
{{- if .Values.thirdParty.moneris.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-moneris-secret
labels:
app: {{ .Release.Name }}
type: Opaque
data:
merchant-id: {{ .Values.thirdParty.moneris.merchantId | b64enc }}
store-id: {{ .Values.thirdParty.moneris.storeId | b64enc }}
ht-profile-id: {{ .Values.thirdParty.moneris.htProfileId | b64enc }}
app-id: {{ .Values.thirdParty.moneris.appId | b64enc }}
app-secret: {{ .Values.thirdParty.moneris.appSecret | b64enc }}
test-merchant-id: {{ .Values.thirdParty.moneris.testMerchantId | b64enc }}
test-store-id: {{ .Values.thirdParty.moneris.testStoreId | b64enc }}
test-ht-profile-id: {{ .Values.thirdParty.moneris.testHtProfileId | b64enc }}
test-app-id: {{ .Values.thirdParty.moneris.testAppId | b64enc }}
test-app-secret: {{ .Values.thirdParty.moneris.testAppSecret | b64enc }}
{{- end -}}

View file

@ -0,0 +1,15 @@
{{- if eq .Values.nosql.type "mongodb" -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-mongo-credentials
type: Opaque
data:
user: {{ .Values.nosql.user | b64enc }}
password: {{ .Values.nosql.password | b64enc }}
{{- if and (.Values.nosql.connectionString) (not .values.nosql.create) }}
connection-string: {{ .Values.nosql.connectionString | b64enc }}
{{- else if .Values.nosql.create }}
connection-string: {{ printf "mongodb://%s:%s@%s-mongo-svc.%s.svc.cluster.local:27017/%s?replicaSet=%s-mongo" .Values.nosql.user .Values.nosql.password .Release.Name .Release.Namespace .Values.nosql.name .Release.Name | b64enc }}
{{- end }}
{{- end -}}

View file

@ -0,0 +1,16 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mongodb-database
namespace: {{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: mongodb-database
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: mongodb-database
namespace: {{ .Release.Namespace }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View file

@ -0,0 +1,22 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mongodb-database
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- patch
- delete
- get
{{- end -}}

View file

@ -0,0 +1,7 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: mongodb-database
namespace: {{ .Release.Namespace }}
{{- end -}}

31
templates/mongo.yaml Normal file
View file

@ -0,0 +1,31 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: {{ .Release.Name }}-mongo
namespace: {{ .Release.Namespace }}
spec:
members: {{ .Values.nosql.replicaCount }}
type: ReplicaSet
version: 4.4.0
security:
authentication:
ignoreUnknownUsers: true
modes:
- SCRAM
tls:
enabled: {{ .Values.nosql.tls.enabled }}
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 10
users:
- name: {{ .Values.nosql.user }}
db: {{ .Values.nosql.name }}
passwordSecretRef:
name: {{ .Release.Name }}-mongo-credentials
key: password
roles:
- name: readWrite
db: {{ .Values.nosql.name }}
scramCredentialsSecretName: {{ .Release.Name }}-mongo-scram
{{- end -}}

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-nosql-grouping
labels:
app: {{ .Release.Name }}
data:
access-properties: {{ .Values.nosql.grouping.accessProperties }}
locales: {{ .Values.nosql.grouping.locales }}
order-properties: {{ .Values.nosql.grouping.orderProperties }}
price-properties: {{ .Values.nosql.grouping.priceProperties }}
service-category-properties: {{ .Values.nosql.grouping.serviceCategoryProperties }}
service-properties: {{ .Values.nosql.grouping.serviceProperties }}

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-oauth-credentials
labels:
app: {{ .Release.Name }}
data:
base-app-url: {{ .Values.oauth.baseAppUrl }}
app-abbreviation: {{ .Values.oauth.appAbbreviation }}
app-name: {{ .Values.oauth.appName }}
service-name: {{ .Values.oauth.serviceName }}
dev-port: {{ .Values.oauth.devPort | quote }}

View file

@ -0,0 +1,49 @@
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-redis
labels:
app: redis
spec:
replicas: {{ .Values.cache.replicaCount }}
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: {{ .Values.cache.image.repository | default "bitnami/redis" }}:{{ .Values.cache.image.tag | default "7.0.5" }}
ports:
- containerPort: {{ .Values.cache.port }}
{{- if .Values.cache.tls.enabled }}
- containerPort: {{ .Values.cache.tls.port }}
{{- end }}
env:
- name: ALLOW_EMPTY_PASSWORD
value: "false"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-cache-credentials
key: password
- name: REDIS_DISABLE_COMMANDS
value: "FLUSHDB,FLUSHALL"
# TLS configuration
#- name: REDIS_TLS_ENABLED
# value: "{{ .Values.cache.tls.enabled }}"
#- name: REDIS_TLS_AUTH_CLIENTS
# value: "yes"
#- name: REDIS_TLS_PORT_NUMBER
# value: "{{ .Values.cache.tls.port }}"
volumeMounts:
- name: redis-data
mountPath: /bitnami/redis
volumes:
- name: redis-data
emptyDir: {}
{{- end -}}

View file

@ -0,0 +1,15 @@
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-redis
labels:
app: redis
spec:
ports:
- port: {{ .Values.cache.port }}
targetPort: {{ .Values.cache.port }}
selector:
app: redis
type: ClusterIP
{{- end -}}

11
templates/service.yaml Normal file
View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
selector:
app: {{ .Release.Name }}
ports:
- protocol: TCP
port: 80
targetPort: {{ .Values.container.port }}

View file

@ -0,0 +1,14 @@
{{- if .Values.thirdParty.stripe.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-stripe-secret
labels:
app: {{ .Release.Name }}
type: Opaque
data:
public-key: {{ .Values.thirdParty.stripe.publicKey | b64enc }}
secret-key: {{ .Values.thirdParty.stripe.secretKey | b64enc }}
test-public-key: {{ .Values.thirdParty.stripe.testPublicKey | b64enc }}
test-secret-key: {{ .Values.thirdParty.stripe.testSecretKey | b64enc }}
{{- end -}}

View file

@ -0,0 +1,15 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-vault-creds
labels:
app: {{ .Release.Name }}-vault
spec:
storageClassName: {{ .Values.vault.storage.storageClass }}
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.vault.create.storage.size }}
{{- end -}}

View file

@ -0,0 +1,66 @@
{{- if and (.Values.vault.create.enabled) (eq .Values.vault.type "hashicorp") -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-vault
labels:
app: {{ .Release.Name }}-vault
spec:
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-vault
template:
metadata:
labels:
app: {{ .Release.Name }}-vault
spec:
containers:
- name: {{ .Release.Name }}-vault
image: {{ .Values.vault.create.image.repository }}:{{ .Values.vault.create.image.tag }}
ports:
- containerPort: 8200
- containerPort: 8201
{{- if .Values.vault.create.snapshotServer.enabled }}
- containerPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- end }}
env:
- name: VAULT_ADDR
value: http://0.0.0.0:8200
- name: POLICY_CAPABILITIES
value: {{ .Values.vault.create.policyCapabilities | join "," }}
- name: ROLE_ID_SECRET_NAME
value: VAULT_ROLE_ID
- name: SECRET_ID_SECRET_NAME
value: VAULT_SECRET_ID
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: SNAPSHOT_SERVER_PORT
value: {{ .Values.vault.create.snapshotServer.internalPort | quote }}
{{- end }}
volumeMounts:
- name: vault-data
mountPath: /vault/data
- name: vault-log
mountPath: /vault/logs
- name: vault-creds
mountPath: /vault/creds
- name: vault-role-vars
mountPath: /role_vars
capAdd:
- IPC_LOCK
volumes:
- name: vault-data
emptyDir: {}
- name: vault-log
emptyDir: {}
- name: vault-creds
{{- if .Values.vault.create.snapshotServer.enabled }}
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-creds
{{- else }}
emptyDir: {}
{{- end }}
- name: vault-role-vars
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-role-vars
{{- end -}}

View file

@ -0,0 +1,24 @@
{{- if and (.Values.vault.enabled) (eq .Values.vault.type "hashicorp") -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-vault-secret
type: opaque
data:
{{- if .Values.vault.create.enabled }}
# Because we create the Hashicorp Vault instance as part of the Helm chart,
# we can use the name of the created resource (utilizing k8s built-in container connections)
# to connect to the Vault instance without having to hard-code the Vault name.
vault-name: {{ printf "%s-vault" .Release.Name | b64enc }}
# Because we create the Hashicorp Vault instance as part of the Helm chart,
# We know the port that the Vault instance is running on.
vault-port: {{ printf "%d" 80 | b64enc }}
{{- else }}
# Because the Vault wasn't created as part of the Helm chart,
# we need the deployer to specify the name of the Vault instance to connect to.
vault-name: {{ .Values.vault.vaultName | b64enc }}
# Because the Vault wasn't created as part of the Helm chart,
# we need the deployer to specify the port that the Vault instance is running on.
vault-port: {{ .Values.passVault.vaultPort | b64enc }}
{{- end }}
{{- end -}}

View file

@ -0,0 +1,21 @@
{{- if .Values.vault.create.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-vault-ingress
labels:
app: {{ .Release.Name }}-vault
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.vault.create.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}-vault
port:
number: 80
{{- end -}}

View file

@ -0,0 +1,15 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-vault-role-vars
labels:
app: {{ .Release.Name }}-vault
spec:
storageClassName: {{ .Values.vault.create.storage.storageClass }}
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.vault.create.storage.size }}
{{- end -}}

View file

@ -0,0 +1,22 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-vault
labels:
app: {{ .Release.Name }}-vault
spec:
selector:
app: {{ .Release.Name }}-vault
ports:
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: custom-snapshot-server
protocol: TCP
port: {{ .Values.vault.create.snapshotServer.externalPort }}
targetPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- end }}
- name: http
protocol: TCP
port: 80
targetPort: 8200
{{- end -}}

View file

@ -0,0 +1,8 @@
{{- if .Values.vault.create.snapshotServer.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-vault-snapshot-config
data:
port: {{ .Values.vault.create.snapshotServer.externalPort | quote }}
{{- end -}}

393
values.yaml Normal file
View file

@ -0,0 +1,393 @@
# The number of instances (replicas) of the app to run
replicaCount: 1
image:
# The repository of the image to use for the app
# Should be in the format `<Image Repository (Ex. containers.example.com)>/<Image Name (Ex. app)>`
repository: "<Image Repository>/<Image Name>"
# The specific image tag to use. It's recommended to use some kind of versioning tag scheme as it makes updating the container without having to fully redeploy easier.
# Ex. v1.0.0
tag: "v1.0.0"
# How often the image should be pulled. The possible values are "Always", "Never", and "IfNotPresent"
# It's recommended for production to use "IfNotPresent" to avoid pulling the image every time the pod starts
# Though, for development, "Always" is recommended to ensure the latest changes are being tested
pullPolicy: "IfNotPresent"
container:
# The port that the container listens on (Ex. 8080)
port: 8080
# The environment that the container is running in (Ex. development, production, etc...)
# This is used for the NODE_ENV environment variable
env: "production"
# The timestamp of the backup that the entrypoint script should wait for a restore to complete
restoreFromBackup: ""
ingress:
# We want an ingress resource if we are deploying to a cluster that has a ingress controller/load balancer
# This includes most public cloud providers like EKS, GKE, and AKS
enabled: true
# The DNS Name (Ex. app.example.com) where the app will be accessible
host: "<hostname>"
# The class of the ingress controller that is being used (defaulted here to an NGINX ingress controller as it's popular for Kubernetes clusters)
class: nginx
# Configuration for using OAuth within the app
oauth:
baseAppUrl: "<Base App URL>"
appAbbreviation: "<App Abbreviation>"
appName: "<App Name>"
serviceName: "<Service Name>"
devPort: "<Dev Port>"
# Configuration for the relational database
database:
# The type of the relational database that is used.
#
# The following table lists the possible values for this field:
#
# | Value | Description |
# | ---------- | ------------------------------------------ |
# | `postgres` | Uses PostgreSQL as the relational database |
#
# Note, for use of `postgres`, it uses a [`postgres-controller` CRD](https://github.com/AlanBridgeman/postgres-controller) to create the database
#
type: "postgres"
# If set to `true`, the database will be created as part of the deployment
# This uses the [`postgres-controller` CRD](https://github.com/AlanBridgeman/postgres-controller) to create the database
create: false
# The host that the database is located on
host: "<DB Host>"
# The name of the database to be used
name: "<DB Name>"
# The user that is used to access the database
user: "<DB User>"
# The password that is used to access the database
password: "<DB Password>"
# The port that the database listens on
#port: 5432
# Allows for distinguishing between multiple database instances/servers
#instance_id: ""
# Configurations for the secrets vault
vault:
# If a secrets vault should be used
# That is, if a dedicated software for secret management should be used
# This should virtually always be true if storing any kind of sensitive information as it's the most secure option
enabled: true
# The type of secrets vault to use.
#
# Vaults
# ------
# The following table lists the supported vault types:
#
# | Type | Description | Current Status | Required Fields |
# | ----------- | -------------------- | -------------- | --------------------------------------------------- |
# | `hashicorp` | Uses Hashicorp Vault | Implemented | `vaultName` (if `create` not true) |
# | `azure` | Uses Azure Key Vault | Implemented | `vaultName`, `clientId`, `clientSecret`, `tenantId` |
#
type: "<Vault Type>"
# Configurations to create a Hashicorp Vault instance as part of the Helm chart
#
# THIS IS ONLY RELEVANT IF `type` IS SET TO `hashicorp`
create:
# If a Hashicorp Vault instance should be created as part of the Helm chart
enabled: <true/false>
# Configurations for the image to use if creating the Hashicorp Vault instance
# as part of the Helm chart
image:
# The repository of the image to use
repository: <image repository>
# The tag of the image to use
tag: <image tag>
# Configurations for the ingress of the created Hashicorp Vault instance
ingress:
# If an ingress should be created for the created Hashicorp Vault instance
enabled: <true/false>
# The host of the ingress for the created Hashicorp Vault instance
host: <DNS Name for vault>
# As part of the custom Hashicorp Vault image it includes a "snapshot server"
# This allows for the triggering and receiving of manual backups ("snapshots") and restoration via HTTP
snapshotServer:
# If the snapshot server should be enabled/running
enabled: true
# The external port that is opened via the service resource
externalPort: 81
# The internal port that the snapshot server listens on
internalPort: 8300
# Configurations for the storage of the created Hashicorp Vault instance
storage:
# The storage class to use for the created Hashicorp Vault instance's Persistent Volume Claim
storageClass: <Storage Class>
# The size of the created Hashicorp Vault instance's Persistent Volume Claim
size: <Storage Size>
# The capabilities of the policy to create for the app
policyCapabilities:
- read
- create
- update
# The name of the vault instance to connect to
#
# This is relevant if type is set to `hashicorp` or `azure`
# Note, if `create` is true this is ignored
#
# For `hashicorp`, this is generally the hostname of the Hashicorp Vault instance to connect to
# For `azure`, this is the name of the Azure Key Vault instance to connect to
#vaultName: "<Vault Name>"
# The port of the vault instance to connect to
#
# ONLY RELEVANT iF `type` IS SET TO `hashicorp` AND `create` IS NOT TRUE
#vaultPort: <Vault Port>
# The client ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-id: <Azure Key Vault Client ID>
# The client secret of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-secret: <Azure Key Vault Client Secret>
# The tenant ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#tenant-id: <Azure Key Vault Tenant ID>
# Configuration the NoSQL database
# Within the parlance of the system these are often called "properties" databases (and store less structured data)
nosql:
# Determines the type of NoSQL storage that is used
#
# The following table lists the possible values for this field:
#
# | Value | Description |
# | --------- | ------------------------------------------------------------------------------------------ |
# | `mongodb` | Uses MongoDB as the NoSQL database for the default account properties database |
# | `azure` | Uses Azure Table Storage as the NoSQL database for the default account properties database |
#
type: <NoSQL Storage Type>
# If to create a resource as part of the deployment process
# ONLY relevant if `type` is set to `mongodb`
# This uses the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) to create the resource
create: false
# The number of replicas/members as part of the Mongo deployment
# See the `member` parameter of the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) for more information
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `true`
#replicaCount: <Number of replicas>
# The TLS configuration for the connection to the NoSQL database
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `true`
tls:
# If to use TLS for the connection to the NoSQL database
enabled: <true/false>
# The connection string used to access the NoSQL database
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `false`
# Should be in the following format: `mongodb://<hostname>:<port>`
#connectionString: "mongodb://mongo.example.com:27017"
# The key used to access the NoSQL database
# ONLY relevant if `type` is set to `azure`
#key: ""
# The name of the NoSQL database
name: "<NoSQL DB Name>"
# The username used to access the NoSQL database
# ONLY relevant if `type` is set to `mongodb`
user: "<MongoDB User>"
# The password used to access the NoSQL database
# ONLY relevant if `type` is set to `mongodb`
password: "<MongoDB Password>"
# Configurable NoSQL information groupings
# For Azure Table Storage these are table names
# For MongoDB these are collection names
grouping:
<Table Intermediate Name>: "<NoSQL Table Name>"
# Configuration for cache server
cache:
type: "redis"
# If to create a Redis instance/resource as part of the deployment process
create: false
# The image to use for the Redis instance
# ONLY relevant if `create` is set to `true`
image: {}
# The number of replicas of the Redis instance
# ONLY relevant if `create` is set to `true`
#replicaCount: <Number of replicas (Ex. 1)>
# Hostname of the Redis server
# ONLY relevant if `create` is set to `false`
#hostName: "<Redis Host Name>"
# The password to use for the Redis server
password: "<Redis Password>"
# The port of the Redis server
port: "<Redis Port>"
# Redis TLS Configurations
tls:
# If TLS is enabled for the Redis instance
enabled: false
# The port of the Redis instance for TLS
# ONLY relevant if `tls.enabled` is set to `true`
#port: "<TLS Port (Ex. 6380)>"
# Configurations for communication services/relays
comms:
# Configurations for Email
email:
# If emailing is enabled
#
# Note, if `true` then the `type` must be set (along with any required fields for that type)
#
# Please see the notes on the `type` property below for more information.
# Including required fields for each type
enabled: true
# The type/provider to use for sending emails
#
# The following types are supported:
#
# | Type | Description | Current Status | Required Fields |
# | --------- | --------------------------------------- | -------------- | --------------------------------------- |
# | `acs` | Uses Azure Communication Services (ACS) | Implemented | `connectionString` |
# | `sendgrid`| Uses SendGrid | To-Do | `apiKey` |
# | `smtp` | Uses SMTP | To-Do | `host`, `port`, `username`, `password` |
#
type: 'acs'
# The connection string for the Azure Communication Services (ACS) Email service
#
# THIS IS REQUIRED IF `type` IS SET TO `acs`. Ignored otherwise.
connectionString: "<ACS Connection String>"
# Configurations for integration with third-party services
thirdParty:
# Configurations for the Stripe integration
stripe:
# If the integration is enabled
enabled: true
# The public key (live) of the Stripe integration
publicKey: <Stripe Live Public Key>
# The secret key (live) of the Stripe integration
secretKey: <Stripe Live Secret Key>
# The public key (test) of the Stripe integration
testPublicKey: <Stripe Test Public Key>
# The secret key (test) of the Stripe integration
testSecretKey: <Stripe Test Secret Key>
# Configurations for the Moneris integration
moneris:
# If the integration is enabled
enabled: true
# The Merchant ID (production) for the Moneris integration
merchantId: <Moneris Merchant ID>
# The Store ID (production) for the Moneris integration
storeId: <Moneris Store ID>
# The Hosted Tokenization Profile ID (production) for the Moneris iframe
htProfileId: <Moneris Hosted Tokenization Profile ID>
# The Application/client ID (production) for the Moneris API
appId: <Moneris Application ID>
# The Application/client Secret (production) for the Moneris API
appSecret: <Moneris App Secret>
# The Merchant ID (test) for the Moneris integration
testMerchantId: <Moneris Test Merchant ID>
# The Store ID (test) for the Moneris integration
testStoreId: <Moneris Test/QA Store ID>
# The Hosted Tokenization Profile ID (test) for the Moneris iframe
testHtProfileId: <Moneris Test/QA Hosted Tokenization Profile ID>
# The Application/client ID (test) for the Moneris API
testAppId: <Moneris Sandbox/Tes Application Id>
# The Application/client Secret (test) for the Moneris API
testAppSecret: <Moneris Sandbox/Test App Secret>
# Configuration for the logging sidecar
loggingSidecar:
# If the logging sidecar should be used (enabled)
enabled: true
# The image used for the logging sidecar
image:
repository: "<repository>/<Logging Sidecar Image Name>"
tag: v1.0.0
pullPolicy: IfNotPresent
# The name of the logging sidecar container
name: logging-sidecar
# The port that the logging sidecar listens on
port: 3000
# Log aggretator authentication details
auth:
username: "<Log Aggregator (OpenObserve) Username>"
password: "<Log Aggregator (OpenObserve) Password>"
# Configuration for the backup sidecar
backupSidecar:
# If the backup sidecar should be used (enabled)
enabled: true
# The image used for the backup sidecar
image:
repository: "<repository>/<Backup Sidecar Image Name>"
tag: v1.0.0
pullPolicy: IfNotPresent
# The name of the backup sidecar container
name: backup-sidecar
# The port that the backup sidecar listens on
port: 3001