Major change that brought the chart in line with others. Mainly by more heavily relyng on subcharts and *.tpl files for code modularity and reusability (and consistency)
All checks were successful
Deploy the Helm Chart / build (push) Successful in 15s

This commit is contained in:
Alan Bridgeman 2025-12-24 19:07:22 -06:00
parent 84a322eb28
commit 563a76b84e
34 changed files with 621 additions and 1103 deletions

View file

@ -34,12 +34,17 @@ jobs:
helm repo add BridgemanAccessible https://helm.bridgemanaccessible.ca helm repo add BridgemanAccessible https://helm.bridgemanaccessible.ca
# Check if the chart is already in the repository or not # Check if the chart is already in the repository or not
REMOTE_CHART_WORK_OUTPUT=$(helm search repo BridgemanAccessible/$CHART_NAME 2>/dev/null || echo "") SEARCH_JSON=$(helm search repo BridgemanAccessible/$CHART_NAME --output json)
if [ -n "$REMOTE_CHART_WORK_OUTPUT" ]; then
# Parse the JSON to see if our specific chart name exists in the results
IS_FOUND=$(echo "$SEARCH_JSON" | jq -r ".[] | select(.name == \"BridgemanAccessible/$CHART_NAME\") | .name")
if [ -n "$IS_FOUND" ]; then
# The chart is already in the repository, so we need to check if the version is the same or not # The chart is already in the repository, so we need to check if the version is the same or not
echo "Chart already exists in the repository. Checking version..." echo "Chart already exists in the repository. Checking version..."
IFS=' ' read -r -a REMOTE_VERSIONS <<< $(helm search repo BridgemanAccessible/$CHART_NAME --output json | jq '.[].version | @sh') # Parse the versions from the ALREADY fetched JSON
IFS=' ' read -r -a REMOTE_VERSIONS <<< $(echo "$SEARCH_JSON" | jq -r ".[] | select(.name == \"BridgemanAccessible/$CHART_NAME\") | .version")
echo "Remote Chart Versions: ${REMOTE_VERSIONS[@]}" echo "Remote Chart Versions: ${REMOTE_VERSIONS[@]}"
else else
@ -47,7 +52,7 @@ jobs:
echo "Chart not found in the repository. Adding it..." echo "Chart not found in the repository. Adding it..."
# Set a blank value so that it WON'T match the local version # Set a blank value so that it WON'T match the local version
IFS=' ' read -r -a REMOTE_VERSIONS <<< "" REMOTE_VERSIONS=()
fi fi
# Just to keep things clean/safe/etc... remove the repo # Just to keep things clean/safe/etc... remove the repo
@ -76,7 +81,7 @@ jobs:
fi fi
# If the versions match, we want to increment the chart's patch version # If the versions match, we want to increment the chart's patch version
if [ "has_match" ]; then if [ "$has_match" == "true" ]; then
echo "Versions match!" echo "Versions match!"
# Increment the patch version of the local version (Ex. 1.0.0 -> 1.0.1) # Increment the patch version of the local version (Ex. 1.0.0 -> 1.0.1)
@ -108,251 +113,258 @@ jobs:
echo "Versions do not match!" echo "Versions do not match!"
fi fi
# Download Helm dependencies
helm dependency build .
# Verify the chart is valid
helm lint
# Package and push the Helm chart to the OCI registry
helm package . helm package .
helm push ./$CHART_NAME-$LOCAL_VERSION.tgz oci://${{ secrets.REPOSITORY_HOSTNAME }}/helm helm push ./$CHART_NAME-$LOCAL_VERSION.tgz oci://${{ secrets.REPOSITORY_HOSTNAME }}/helm
# Deploy to our environment (stagging cluster) # Deploy to our environment (stagging cluster)
deploy: #deploy:
runs-on: self-hosted # runs-on: self-hosted
needs: build # needs: build
outputs: # outputs:
backup-created: ${{ steps.backup.outputs.created }} # backup-created: ${{ steps.backup.outputs.created }}
restore-time: ${{ steps.deploy-helm-chart.outputs.RESTORE_TIME }} # restore-time: ${{ steps.deploy-helm-chart.outputs.RESTORE_TIME }}
env: # env:
NAMESPACE: ciam-services-dashboard # NAMESPACE: ciam-services-dashboard
RELEASE_NAME: services # RELEASE_NAME: services
CHART_NAME: ${{ needs.build.outputs.chart-name }} # CHART_NAME: ${{ needs.build.outputs.chart-name }}
steps: # steps:
# Check if it's already deployed # # Check if it's already deployed
# This determines if we do the backup and takedown before deploying the new version # # This determines if we do the backup and takedown before deploying the new version
- name: Already Deployed Check # - name: Already Deployed Check
id: already-deployed-check # id: already-deployed-check
run: | # run: |
# Check if the Helm chart is already deployed # # Check if the Helm chart is already deployed
if helm status -n ${{ env.NAMESPACE }} ${{ env.RELEASE_NAME }}; then # if helm status -n ${{ env.NAMESPACE }} ${{ env.RELEASE_NAME }}; then
echo "Already deployed" # echo "Already deployed"
echo "already-deployed=true" >> $GITHUB_OUTPUT # echo "already-deployed=true" >> $GITHUB_OUTPUT
else # else
echo "Not deployed" # echo "Not deployed"
echo "already-deployed=false" >> $GITHUB_OUTPUT # echo "already-deployed=false" >> $GITHUB_OUTPUT
fi # fi
#
# Tale a backup of the current state of the resources # # Tale a backup of the current state of the resources
- name: Backup Data # - name: Backup Data
id: backup # id: backup
if: steps.already-deployed-check.outputs.already-deployed == 'true' # if: steps.already-deployed-check.outputs.already-deployed == 'true'
run: | # run: |
# -- Setup Backup Sidecar metadata variables -- # # -- Setup Backup Sidecar metadata variables --
#
# Variable for the image name (so that there can't be typos etc...) # # Variable for the image name (so that there can't be typos etc...)
BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar" # BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
#
# Get the available tags from the image registry # # Get the available tags from the image registry
IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh') # IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
#
# Get the latest tag from the list of tags # # Get the latest tag from the list of tags
LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1) # LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote # LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote # LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG" # echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
#
# Get/Generate the "full" image name (including the tag) for the backup sidecar # # Get/Generate the "full" image name (including the tag) for the backup sidecar
FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG" # FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME" # echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
#
# -- END: Setup Backup Sidecar metadata variables -- # # -- END: Setup Backup Sidecar metadata variables --
#
# -- Create a backup -- # # -- Create a backup --
#
# Get the name of the running main pod (the one that has the backup sidecar container in it) # # Get the name of the running main pod (the one that has the backup sidecar container in it)
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name') # RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
echo "Running Main Pod: $RUNNING_MAIN_POD" # echo "Running Main Pod: $RUNNING_MAIN_POD"
#
# Make the request to create the backups # # Make the request to create the backups
CREATE_BACKUP_OUTPUT=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.RELEASE_NAME }} -- /bin/bash -c "curl -X PUT -sSL http://localhost:4000/backup -o backup-output.txt && cat backup-output.txt") # CREATE_BACKUP_OUTPUT=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.RELEASE_NAME }} -- /bin/bash -c "curl -X PUT -sSL http://localhost:4000/backup -o backup-output.txt && cat backup-output.txt")
echo "Create Backup Output: $CREATE_BACKUP_OUTPUT" # echo "Create Backup Output: $CREATE_BACKUP_OUTPUT"
#
# Parse the output to check if the backup was created successfully # # Parse the output to check if the backup was created successfully
BACKUP_OUTPUT_MESSAGE=$(echo $CREATE_BACKUP_OUTPUT | jq -r '.message') # BACKUP_OUTPUT_MESSAGE=$(echo $CREATE_BACKUP_OUTPUT | jq -r '.message')
if [ "$BACKUP_OUTPUT_MESSAGE" == "Backup created successfully" ]; then # if [ "$BACKUP_OUTPUT_MESSAGE" == "Backup created successfully" ]; then
echo "Backup created successfully." # echo "Backup created successfully."
echo "created=true" >> $GITHUB_OUTPUT # echo "created=true" >> $GITHUB_OUTPUT
else # else
echo "Error creating backup: $BACKUP_OUTPUT_MESSAGE" # echo "Error creating backup: $BACKUP_OUTPUT_MESSAGE"
exit 1 # exit 1
fi # fi
#
# -- END: Create a backup -- # # -- END: Create a backup --
#
# Because we've taken a backup of it's current state and it's easier to start fresh etc... # # Because we've taken a backup of it's current state and it's easier to start fresh etc...
# We delete all the existing resources (including the Helm chart) before (re-)deploying the new one # # We delete all the existing resources (including the Helm chart) before (re-)deploying the new one
- name: Remove old resources # - name: Remove old resources
if: steps.backup.outputs.created == 'true' # if: steps.backup.outputs.created == 'true'
run: | # run: |
# Use a custom script (found in [Helpful Scripts]()) to uninstall the Helm chart and delete any associated resources # # Use a custom script (found in [Helpful Scripts]()) to uninstall the Helm chart and delete any associated resources
k8s-delete \ # k8s-delete \
--namespace ${{ env.NAMESPACE }} \ # --namespace ${{ env.NAMESPACE }} \
--release-name ${{ env.RELEASE_NAME }} \ # --release-name ${{ env.RELEASE_NAME }} \
--rwx-volumes services-vault-role-vars # --rwx-volumes services-vault-role-vars
#
# Deploy the resources to the cluster # # Deploy the resources to the cluster
- name: Deploy Helm Chart # - name: Deploy Helm Chart
id: deploy-helm-chart # id: deploy-helm-chart
run: | # run: |
FILLED_VALUES_FILE="values.filled.yaml" # FILLED_VALUES_FILE="values.filled.yaml"
#
# Download a filled version of the `values.yaml` file from a secure location # # Download a filled version of the `values.yaml` file from a secure location
DOWNLOAD_FILE=$(curl -sSL https://secure-storage.bridgemanaccessible.ca/services-dashboard/values.filled.yaml?token=${{ secrets.SECURE_STORAGE_TOKEN }} -o $FILLED_VALUES_FILE || echo "Failed to download filled values file.") # DOWNLOAD_FILE=$(curl -sSL https://secure-storage.bridgemanaccessible.ca/services-dashboard/values.filled.yaml?token=${{ secrets.SECURE_STORAGE_TOKEN }} -o $FILLED_VALUES_FILE || echo "Failed to download filled values file.")
if [ "$DOWNLOAD_FILE" == "Failed to download filled values file." ]; then # if [ "$DOWNLOAD_FILE" == "Failed to download filled values file." ]; then
echo "Error: $DOWNLOAD_FILE" # echo "Error: $DOWNLOAD_FILE"
exit 1 # exit 1
fi # fi
#
if [ -n "${{ steps.backup.outputs.created }}" ]; then # if [ -n "${{ steps.backup.outputs.created }}" ]; then
CURR_DATETIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # CURR_DATETIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
#
echo "Setting restoreFromBackup to: $CURR_DATETIME" # echo "Setting restoreFromBackup to: $CURR_DATETIME"
#
# If the backup was created successfully, we want to set the `restoreFromBackup` variable to true in the filled values file # # If the backup was created successfully, we want to set the `restoreFromBackup` variable to true in the filled values file
sed -i "s|restoreFromBackup: \"\"|restoreFromBackup: \"$CURR_DATETIME\"|g" $FILLED_VALUES_FILE # sed -i "s|restoreFromBackup: \"\"|restoreFromBackup: \"$CURR_DATETIME\"|g" $FILLED_VALUES_FILE
#
echo "RESTORE_TIME=$CURR_DATETIME" >> $GITHUB_OUTPUT # echo "RESTORE_TIME=$CURR_DATETIME" >> $GITHUB_OUTPUT
fi # fi
#
# Parse the chart name from the Chart.yaml # # Parse the chart name from the Chart.yaml
CHART_NAME=${{ env.CHART_NAME }} # CHART_NAME=${{ env.CHART_NAME }}
echo "Chart Name: $CHART_NAME" # echo "Chart Name: $CHART_NAME"
#
# Can run `k8s-deploy --help` if you want to see all the options available # # Can run `k8s-deploy --help` if you want to see all the options available
k8s-deploy \ # k8s-deploy \
--namespace ${{ env.NAMESPACE }} \ # --namespace ${{ env.NAMESPACE }} \
--release-name ${{ env.RELEASE_NAME }} \ # --release-name ${{ env.RELEASE_NAME }} \
--filled-values-file $FILLED_VALUES_FILE \ # --filled-values-file $FILLED_VALUES_FILE \
--chart-name $CHART_NAME \ # --chart-name $CHART_NAME \
--rwx-volumes services-vault-role-vars # --rwx-volumes services-vault-role-vars
# Once deployed, we want to restore it to it's previous state (if applicable) # Once deployed, we want to restore it to it's previous state (if applicable)
restore: #restore:
runs-on: self-hosted # runs-on: self-hosted
needs: deploy # needs: deploy
if: needs.deploy.outputs.backup-created == 'true' # if: needs.deploy.outputs.backup-created == 'true'
env: # env:
NAMESPACE: ciam-services-dashboard # NAMESPACE: ciam-services-dashboard
CONTAINER_NAME: services # CONTAINER_NAME: services
RESTORE_TIME: ${{ needs.deploy.outputs.restore-time }} # RESTORE_TIME: ${{ needs.deploy.outputs.restore-time }}
SITE_HOSTNAME: services.bridgemanaccessible.ca # SITE_HOSTNAME: services.bridgemanaccessible.ca
steps: # steps:
# Restore the data from the backup # # Restore the data from the backup
- name: Restore data # - name: Restore data
run: | # run: |
# -- Setup Backup Sidecar metadata variables -- # # -- Setup Backup Sidecar metadata variables --
#
# Variable for the image name (so that there can't be typos etc...) # # Variable for the image name (so that there can't be typos etc...)
BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar" # BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
#
# Get the available tags from the image registry # # Get the available tags from the image registry
IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh') # IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
#
# Get the latest tag from the list of tags # # Get the latest tag from the list of tags
LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1) # LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote # LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote # LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG" # echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
#
# Get/Generate the "full" image name (including the tag) for the backup sidecar # # Get/Generate the "full" image name (including the tag) for the backup sidecar
FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG" # FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME" # echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
#
# -- END: Setup Backup Sidecar metadata variables -- # # -- END: Setup Backup Sidecar metadata variables --
#
# Get the name of the running main pod (the one that has the backup sidecar container in it) # # Get the name of the running main pod (the one that has the backup sidecar container in it)
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name') # RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
echo "Running Main Pod: $RUNNING_MAIN_POD" # echo "Running Main Pod: $RUNNING_MAIN_POD"
#
# Variables for retry logic # # Variables for retry logic
MAX_RETRIES=5 # MAX_RETRIES=5
RETRY_INTERVAL=30 # RETRY_INTERVAL=30
RETRY_COUNT=0 # RETRY_COUNT=0
#
# Because Kubernetes (or at least our stagging cluster) can be a bit tempramental, # # Because Kubernetes (or at least our stagging cluster) can be a bit tempramental,
# We allow for "resetting" a few times. # # We allow for "resetting" a few times.
# By resetting, we mean re-detecting the main running pod etc... # # By resetting, we mean re-detecting the main running pod etc...
MAX_RESETS=3 # MAX_RESETS=3
RESET_COUNT=0 # RESET_COUNT=0
#
POD_STATUS=$(kubectl -n ${{ env.NAMESPACE }} get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase') # POD_STATUS=$(kubectl -n ${{ env.NAMESPACE }} get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
echo "Pod Status: $POD_STATUS" # echo "Pod Status: $POD_STATUS"
#
# Retry (wait) a few times if the pod isn't running yet # # Retry (wait) a few times if the pod isn't running yet
while [ "$POD_STATUS" != "Running" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do # while [ "$POD_STATUS" != "Running" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
echo "Pod is not running yet (Current Status: $POD_STATUS). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..." # echo "Pod is not running yet (Current Status: $POD_STATUS). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
sleep $RETRY_INTERVAL # sleep $RETRY_INTERVAL
#
# Get the current status of the pod # # Get the current status of the pod
POD_STATUS=$(kubectl -n ciam-account-dashboard get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase') # POD_STATUS=$(kubectl -n ciam-account-dashboard get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
#
# Increment the retry count # # Increment the retry count
RETRY_COUNT=$((RETRY_COUNT + 1)) # RETRY_COUNT=$((RETRY_COUNT + 1))
#
# If the pod is in a failed state, we want to "reset" # # If the pod is in a failed state, we want to "reset"
# Though, we only want to reset a few times (to avoid infinite loops if something else is wrong etc...) # # Though, we only want to reset a few times (to avoid infinite loops if something else is wrong etc...)
if [[ "$POD_STATUS" == "Failed" ]] && [ $RESET_COUNT -lt $MAX_RESETS ]; then # if [[ "$POD_STATUS" == "Failed" ]] && [ $RESET_COUNT -lt $MAX_RESETS ]; then
echo "Pod is in a failed state. Resetting..." # echo "Pod is in a failed state. Resetting..."
#
# Reset the pod and increment the reset count # # Reset the pod and increment the reset count
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" --arg prev "$RUNNING_MAIN_POD" '.items[] | select(.spec.containers[].image == $img and .metadata.name != $prev) | .metadata.name') # RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" --arg prev "$RUNNING_MAIN_POD" '.items[] | select(.spec.containers[].image == $img and .metadata.name != $prev) | .metadata.name')
echo "Running Main Pod Reset: $RUNNING_MAIN_POD" # echo "Running Main Pod Reset: $RUNNING_MAIN_POD"
#
# Reset the retry count # # Reset the retry count
RETRY_COUNT=0 # RETRY_COUNT=0
echo "Retry Count Reset" # echo "Retry Count Reset"
#
RESET_COUNT=$((RESET_COUNT + 1)) # RESET_COUNT=$((RESET_COUNT + 1))
fi # fi
done # done
#
if [ "$POD_STATUS" != "Running" ]; then # if [ "$POD_STATUS" != "Running" ]; then
echo "Error: Pod did not start running after $((RETRY_COUNT * RETRY_INTERVAL)) seconds." # echo "Error: Pod did not start running after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
exit 1 # exit 1
fi # fi
#
LATEST_BACKUPS=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -sSL http://localhost:4000/backups/latest -o latest-backup.json && cat latest-backup.json") # LATEST_BACKUPS=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -sSL http://localhost:4000/backups/latest -o latest-backup.json && cat latest-backup.json")
echo "Latest Backups: $LATEST_BACKUPS" # echo "Latest Backups: $LATEST_BACKUPS"
#
LATEST_DB_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.db') # LATEST_DB_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.db')
echo "Latest DB Backup: $LATEST_DB_BACKUP" # echo "Latest DB Backup: $LATEST_DB_BACKUP"
#
LATEST_VAULT_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.vault') # LATEST_VAULT_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.vault')
echo "Latest Vault Backup: $LATEST_VAULT_BACKUP" # echo "Latest Vault Backup: $LATEST_VAULT_BACKUP"
#
echo "Restoring Vault Backup: $LATEST_VAULT_BACKUP at restore time: ${{ env.RESTORE_TIME }}" # echo "Restoring Vault Backup: $LATEST_VAULT_BACKUP at restore time: ${{ env.RESTORE_TIME }}"
restore_output=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -s -X POST -H 'Content-Type: application/json' -d '{ \"restoreTime\": \"${{ env.RESTORE_TIME }}\" }' http://localhost:4000/restore/latest -o restore-output.txt && cat restore-output.txt") # restore_output=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -s -X POST -H 'Content-Type: application/json' -d '{ \"restoreTime\": \"${{ env.RESTORE_TIME }}\" }' http://localhost:4000/restore/latest -o restore-output.txt && cat restore-output.txt")
echo "Restore Output: $restore_output" # echo "Restore Output: $restore_output"
#
# Verify that the site starts up after the restore # # Verify that the site starts up after the restore
- name: Verify Restore # - name: Verify Restore
run: | # run: |
SITE_REQUEST_STATUS_CODE=$(curl -sSL https://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null) # SITE_REQUEST_STATUS_CODE=$(curl -sSL https://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null)
#
MAX_RETRIES=5 # MAX_RETRIES=5
RETRY_INTERVAL=30 # RETRY_INTERVAL=30
RETRY_COUNT=0 # RETRY_COUNT=0
#
# Retry (wait) a few times if the site isn't up yet # # Retry (wait) a few times if the site isn't up yet
while [ "$SITE_REQUEST_STATUS_CODE" != "200" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do # while [ "$SITE_REQUEST_STATUS_CODE" != "200" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
echo "Site is not up yet (Current Status Code: $SITE_REQUEST_STATUS_CODE). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..." # echo "Site is not up yet (Current Status Code: $SITE_REQUEST_STATUS_CODE). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
sleep $RETRY_INTERVAL # sleep $RETRY_INTERVAL
#
# Get the current status of the site # # Get the current status of the site
SITE_REQUEST_STATUS_CODE=$(curl -sSL http://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null) # SITE_REQUEST_STATUS_CODE=$(curl -sSL http://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null)
#
# Increment the retry count # # Increment the retry count
RETRY_COUNT=$((RETRY_COUNT + 1)) # RETRY_COUNT=$((RETRY_COUNT + 1))
done # done
#
if [ "$SITE_REQUEST_STATUS_CODE" != "200" ]; then # if [ "$SITE_REQUEST_STATUS_CODE" != "200" ]; then
echo "Error: Site did not start up after $((RETRY_COUNT * RETRY_INTERVAL)) seconds." # echo "Error: Site did not start up after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
exit 1 # exit 1
fi # fi
#
echo "Site is up and running (Status Code: $SITE_REQUEST_STATUS_CODE)." # echo "Site is up and running (Status Code: $SITE_REQUEST_STATUS_CODE)."

7
.gitignore vendored
View file

@ -2,8 +2,11 @@
values.filled.yaml values.filled.yaml
# Exclude any packaged charts # Exclude any packaged charts
services-dashboard-*.tgz ba-services-*.tgz
# Ignore Helm dependencies themselves
charts/
# Random other stuff # Random other stuff
templates.old *.old*
input.json input.json

View file

@ -2,14 +2,15 @@
.helmignore .helmignore
# Ignore the Helm chart's packaged tarball # Ignore the Helm chart's packaged tarball
*.tgz ba-services-*.tgz
# Ignore git related files # Ignore git related files
.git .git
.gitignore .gitignore
# Ignore github automation files # Ignore Github automation files
.github .github
.forgejo
# Ignore inputs file # Ignore inputs file
input.json input.json
@ -17,5 +18,9 @@ input.json
# Ignore the filled in values file # Ignore the filled in values file
values.filled.yaml values.filled.yaml
# Ignore the lock file for dependencies
requirements.lock
Chart.lock
# Ignore .old (backup) files # Ignore .old (backup) files
*.old* *.old*

18
Chart.lock Normal file
View file

@ -0,0 +1,18 @@
dependencies:
- name: ba-custom-hashicorp-vault
repository: https://helm.bridgemanaccessible.ca/
version: 1.0.6
- name: nosql-deploy
repository: https://helm.bridgemanaccessible.ca/
version: 1.0.3
- name: db-deploy
repository: https://helm.bridgemanaccessible.ca/
version: 1.0.2
- name: cache-deploy
repository: https://helm.bridgemanaccessible.ca/
version: 1.0.7
- name: ba-logging-sidecar
repository: https://helm.bridgemanaccessible.ca/
version: 1.0.2
digest: sha256:5e94a036b1b834edb75e456b4d2bb0b9242d8f65b8f4c6d58062004234a02005
generated: "2025-12-24T10:43:44.4164896-06:00"

View file

@ -1,11 +1,37 @@
apiVersion: v1 apiVersion: v2
appVersion: "1.0.298" appVersion: "1.0.298"
description: A Helm chart for deploying Bridgeman Accessible's Services Dashboard description: A Helm chart for deploying Bridgeman Accessible's Services Dashboard
home: https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca-helm home: https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca-helm
maintainers: maintainers:
- email: info@bridgemanaccessible.ca - email: info@bridgemanaccessible.ca
name: Bridgeman Accessible name: Bridgeman Accessible
name: services-dashboard name: ba-services
sources: sources:
- https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca - https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca
version: "1.5.10" version: "2.0.0"
dependencies:
- name: ba-custom-hashicorp-vault
version: 1.0.6
repository: "https://helm.bridgemanaccessible.ca/"
alias: vault
condition: vault.enabled
- name: nosql-deploy
version: 1.0.3
repository: "https://helm.bridgemanaccessible.ca/"
alias: nosql
condition: nosql.enabled
- name: db-deploy
version: 1.0.2
repository: "https://helm.bridgemanaccessible.ca/"
alias: database
condition: database.enabled
- name: cache-deploy
version: 1.0.7
repository: "https://helm.bridgemanaccessible.ca/"
alias: cache
condition: cache.enabled
- name: ba-logging-sidecar
version: 1.0.2
repository: "https://helm.bridgemanaccessible.ca/"
alias: loggingSidecar
condition: loggingSidecar.enabled

61
templates/_sidecars.tpl Normal file
View file

@ -0,0 +1,61 @@
{{- define "backupSidecar" -}}
- name: {{ .Values.backupSidecar.name }}
image: {{ .Values.backupSidecar.image.repository }}:{{ .Values.backupSidecar.image.tag }}
imagePullPolicy: {{ .Values.backupSidecar.image.pullPolicy }}
ports:
- containerPort: {{ .Values.backupSidecar.port }}
env:
# Release name (used to identify the service/release the backups came from in remote storage)
- name: RELEASE_NAME
value: {{ .Release.Name }}
{{- include "db.envVars" . | nindent 2 -}}
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: VAULT_NAME
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-pass-vault-secret
key: vault-name
- name: VAULT_SNAPSHOT_SERVER_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-pass-vault-snapshot-config
key: port
{{- end }}
{{- if eq .Values.nosql.type "mongodb" }}
# NoSQL storage related environment variables
# Note, we only worry about self-hosted options as cloud-based should have their own backups etc...
- name: STORAGE_ACCOUNT_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-mongo-credentials
key: connection-string
{{- end }}
# Redis is used for BullMQ, which is how we schedule backups
# We use this instead of, for instance cron jobs, as it lets us deal with failures
{{- include "cache.envVars" . | nindent 2 }}
resources:
requests:
cpu: {{ .Values.backupSidecar.resources.requests.cpu }}
memory: {{ .Values.backupSidecar.resources.requests.memory }}
ephemeral-storage: {{ .Values.backupSidecar.resources.requests.ephemeralStorage }}
limits:
cpu: {{ .Values.backupSidecar.resources.limits.cpu }}
memory: {{ .Values.backupSidecar.resources.limits.memory }}
ephemeral-storage: {{ .Values.backupSidecar.resources.limits.ephemeralStorage }}
{{- if .Values.vault.create.snapshotServer.enabled }}
volumeMounts:
# Mount for a shared volume for Vault credentials
# This is separate from the app's `role vars` volume because it includes other credentials
# In particular, the unseal keys which we require when/if we restore from the backup
# This volume is also read-only where the `role-vars` is read-write (see description below for why)
- name: creds
mountPath: /vault-creds
readOnly: true
# Mount for a shared volume for the Vault's role variables for the app
# This is required by the backup sidecar because if a restart of the app occurs AFTER a vault has been reset (ex. vault using a different container instance),
# despite the vault data being restored the app would receive incorrect credentials (because this is ONLY written during setup of the vault)
# The backup sidecar mitigates this by doing it's own write (to overwrite) once it's done a restore
- name: role-vars
mountPath: /role_vars
{{- end }}
{{- end -}}

77
templates/_thirdParty.tpl Normal file
View file

@ -0,0 +1,77 @@
{{- define "stripe.envVars" -}}
# Stripe Environment Variables
- name: STRIPE_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: live-public-key
- name: STRIPE_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: live-secret-key
- name: STRIPE_TEST_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: test-public-key
- name: STRIPE_TEST_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: test-secret-key
{{- end -}}
{{- define "moneris.envVars" -}}
# Moneris Environment Variables
- name: MONERIS_MERCHANT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: merchant-id
- name: MONERIS_STORE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: store-id
- name: MONERIS_HT_PROFILE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: ht-profile-id
- name: MONERIS_APP_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: app-id
- name: MONERIS_APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: app-secret
- name: MONERIS_TEST_MERCHANT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-merchant-id
- name: MONERIS_TEST_STORE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-store-id
- name: MONERIS_TEST_HT_PROFILE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-ht-profile-id
- name: MONERIS_TEST_APP_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-app-id
- name: MONERIS_TEST_APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-app-secret
{{- end -}}

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-cache-configmap
namespace: {{ .Release.Namespace }}
data:
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) }}
hostname: {{ .Release.Name }}-redis
{{- else }}
hostname: {{ .Values.cache.hostname }}
{{- end }}
port: {{ .Values.cache.port | quote }}

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-cache-credentials
type: Opaque
data:
password: {{ .Values.cache.password | b64enc }}

View file

@ -1,37 +0,0 @@
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
apiVersion: postgresql.org/v1
kind: PostgresDatabase
metadata:
name: {{ .Release.Name }}-db
namespace: {{ .Release.Namespace }}
spec:
dbName:
envFrom:
configMapKeyRef:
- name: {{ .Release.Name }}-db-credentials
namespace: postgres-controller
key: db-name
dbRoleName:
envFrom:
configMapKeyRef:
- name: {{ .Release.Name }}-db-credentials
namespace: postgres-controller
key: db-user
dbRolePassword:
envFrom:
secretKeyRef:
- name: {{ .Release.Name }}-db-password
namespace: postgres-controller
key: password
# Because we've adopted a "throw away"/"ephemeral"/"container-esk" approach to our database, we want it to be dropped/deleted when everything else is deleted.
# This is because we re-create it and restore from a backup on every deploy.
# Which helps keep the data current and reinforces the utility of the backup and restore systems.
onDeletion:
# Whether to drop the database when the resource is deleted
dropDB: true
# Whether to drop the role when the resource is deleted
dropRole: true
{{- if .Values.database.instance_id }}
dbInstanceId: {{ .Values.database.instance_id }}
{{- end }}
{{- end -}}

View file

@ -1,16 +0,0 @@
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-db-credentials
namespace: postgres-controller
data:
db-host: {{ .Values.database.host }}
db-name: {{ .Values.database.name }}
db-user: {{ .Values.database.user }}
{{- if .Values.database.port }}
db-port: {{ .Values.database.port | quote }}
{{- else }}
db-port: "5432"
{{- end }}
{{- end -}}

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-db-credentials
data:
db-host: {{ .Values.database.host }}
db-name: {{ .Values.database.name }}
db-user: {{ .Values.database.user }}
{{- if .Values.database.port }}
db-port: {{ .Values.database.port | quote }}
{{- else }}
db-port: "5432"
{{- end }}

View file

@ -1,10 +0,0 @@
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-db-password
namespace: postgres-controller
type: Opaque
data:
password: {{ .Values.database.password | b64enc }}
{{- end -}}

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-db-password
type: Opaque
data:
password: {{ .Values.database.password | b64enc }}

View file

@ -5,7 +5,7 @@ metadata:
labels: labels:
app: {{ .Release.Name }} app: {{ .Release.Name }}
spec: spec:
replicas: {{ .Values.replicaCount }} replicas: {{ .Values.app.replicaCount }}
selector: selector:
matchLabels: matchLabels:
app: {{ .Release.Name }} app: {{ .Release.Name }}
@ -16,15 +16,15 @@ spec:
spec: spec:
containers: containers:
- name: {{ .Release.Name }} - name: {{ .Release.Name }}
image: {{ .Values.image.repository }}:{{ .Values.image.tag }} image: {{ .Values.app.image.repository }}:{{ .Values.app.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.app.image.pullPolicy }}
ports: ports:
- containerPort: {{ .Values.container.port }} - containerPort: {{ .Values.app.container.port }}
env: env:
- name: NODE_ENV - name: NODE_ENV
value: {{ .Values.container.env }} value: {{ .Values.app.container.env }}
- name: PORT - name: PORT
value: "{{ .Values.container.port }}" value: "{{ .Values.app.container.port }}"
- name: DOMAIN - name: DOMAIN
value: "bridgemanaccessible.ca" value: "bridgemanaccessible.ca"
- name: ACCOUNTS_DEV_PORT - name: ACCOUNTS_DEV_PORT
@ -57,51 +57,9 @@ spec:
configMapKeyRef: configMapKeyRef:
name: {{ .Release.Name }}-oauth-credentials name: {{ .Release.Name }}-oauth-credentials
key: dev-port key: dev-port
# Database credentials {{- include "db.envVars" . | nindent 8 }}
- name: DB_HOST {{- include "cache.envVars" . | nindent 8 }}
valueFrom: {{- include "nosql.envVars" . | nindent 8 }}
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-host
- name: DB_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-name
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-db-password
key: password
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-port
- name: DB_USER
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-user
# NoSQL Credentials
{{- if eq .Values.nosql.type "mongodb" }}
- name: STORAGE_ACCOUNT_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-mongo-credentials
key: connection-string
{{- else if eq .Values.nosql.type "azure" }}
- name: STORAGE_ACCOUNT_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-azure-tables-credentials
key: key
- name: STORAGE_ACCOUNT_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-azure-tables-config
key: name
{{- end }}
# NoSQL Grouping Names # NoSQL Grouping Names
- name: ACCESS_PROPERTIES_STORAGE_TABLE_NAME - name: ACCESS_PROPERTIES_STORAGE_TABLE_NAME
valueFrom: valueFrom:
@ -169,22 +127,6 @@ spec:
key: vault-port key: vault-port
{{- end }} {{- end }}
{{- end }} {{- end }}
# Caching Server Variables
- name: CACHE_HOSTNAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: hostname
- name: CACHE_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: port
- name: CACHE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-cache-credentials
key: password
# Email (Azure Communication Services API credentials, etc...) # Email (Azure Communication Services API credentials, etc...)
{{- if and (.Values.comms.email.enabled) (eq .Values.comms.email.type "acs") }} {{- if and (.Values.comms.email.enabled) (eq .Values.comms.email.type "acs") }}
- name: EMAIL_CONNECTION_STRING - name: EMAIL_CONNECTION_STRING
@ -195,91 +137,23 @@ spec:
{{- end }} {{- end }}
# Third-Party Integrations # Third-Party Integrations
{{- if .Values.thirdParty.stripe.enabled }} {{- if .Values.thirdParty.stripe.enabled }}
- name: STRIPE_PUBLIC_KEY {{- /* This injects the YAML defined in the `_thirdParty.tpl` file */ -}}
valueFrom: {{ include "stripe.envVars" . | nindent 8 }}
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: public-key
- name: STRIPE_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: secret-key
- name: STRIPE_TEST_PUBLIC_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: test-public-key
- name: STRIPE_TEST_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-stripe-secret
key: test-secret-key
{{- end }} {{- end }}
{{- if .Values.thirdParty.moneris.enabled }} {{- if .Values.thirdParty.moneris.enabled }}
- name: MONERIS_MERCHANT_ID {{- /* This injects the YAML defined in the `_thirdParty.tpl` file */ -}}
valueFrom: {{ include "moneris.envVars" . | nindent 8 }}
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: merchant-id
- name: MONERIS_STORE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: store-id
- name: MONERIS_HT_PROFILE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: ht-profile-id
- name: MONERIS_APP_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: app-id
- name: MONERIS_APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: app-secret
- name: MONERIS_TEST_MERCHANT_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-merchant-id
- name: MONERIS_TEST_STORE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-store-id
- name: MONERIS_TEST_HT_PROFILE_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-ht-profile-id
- name: MONERIS_TEST_APP_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-app-id
- name: MONERIS_TEST_APP_SECRET
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-moneris-secret
key: test-app-secret
{{- end }} {{- end }}
- name: INITIAL_USER_ID - name: INITIAL_USER_ID
value: {{ .Values.initialUserID | quote }} value: {{ .Values.initialUserID | quote }}
# Logging Sidecar related environment variables
{{- if .Values.loggingSidecar.enabled }} {{- if .Values.loggingSidecar.enabled }}
- name: LOGGING_SIDE_CAR_PORT {{- include "logging.envVars" . | nindent 8 }}
value: {{ .Values.loggingSidecar.port | quote }}
{{- end }} {{- end }}
{{- if ne .Values.app.restoreFromBackup "" }}
# Due to subtleties related to how the entrypoint scripts detects how/when to proceed # Due to subtleties related to how the entrypoint scripts detects how/when to proceed
# This environment variable indicates if the entrypoint should wait for a restore to complete # This environment variable indicates if the entrypoint should wait for a restore to complete
{{- if ne .Values.container.restoreFromBackup "" }}
- name: RESTORE_FROM_BACKUP - name: RESTORE_FROM_BACKUP
value: {{ .Values.container.restoreFromBackup | quote }} value: {{ .Values.app.restoreFromBackup | quote }}
{{- end }} {{- end }}
{{- if .Values.vault.create.enabled }} {{- if .Values.vault.create.enabled }}
volumeMounts: volumeMounts:
@ -292,120 +166,23 @@ spec:
mountPath: /role_vars mountPath: /role_vars
readOnly: true readOnly: true
{{- end }} {{- end }}
resources:
requests:
cpu: {{ .Values.app.resources.requests.cpu }}
memory: {{ .Values.app.resources.requests.memory }}
ephemeral-storage: {{ .Values.app.resources.requests.ephemeralStorage }}
limits:
cpu: {{ .Values.app.resources.limits.cpu }}
memory: {{ .Values.app.resources.limits.memory }}
ephemeral-storage: {{ .Values.app.resources.limits.ephemeralStorage }}
# Logging sidecar for sending logs to a log aggregator # Logging sidecar for sending logs to a log aggregator
{{- if .Values.loggingSidecar.enabled }} {{- if .Values.loggingSidecar.enabled }}
- name: {{ .Values.loggingSidecar.name }} {{ include "logging.sidecar" . | nindent 6 }}
image: {{ .Values.loggingSidecar.image.repository }}:{{ .Values.loggingSidecar.image.tag }}
imagePullPolicy: {{ .Values.loggingSidecar.image.pullPolicy }}
ports:
- containerPort: {{ .Values.loggingSidecar.port }}
env:
- name: PORT
value: {{ .Values.loggingSidecar.port | quote }}
# Log aggregator (OpenObserve) auth variables
- name: LOGGER_AUTH_USERNAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-logging-sidecar-credentials
key: username
- name: LOGGER_AUTH_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-logging-sidecar-password
key: password
{{- end }} {{- end }}
# Backup sidecar for backing up service data # Backup sidecar for backing up service data
{{- if .Values.backupSidecar.enabled }} {{- if .Values.backupSidecar.enabled }}
- name: {{ .Values.backupSidecar.name }} {{- /* This injects the YAML defined in the `_sidecar.tpl` file */ -}}
image: {{ .Values.backupSidecar.image.repository }}:{{ .Values.backupSidecar.image.tag }} {{ include "backupSidecar" . | nindent 6 }}
imagePullPolicy: {{ .Values.backupSidecar.image.pullPolicy }}
ports:
- containerPort: {{ .Values.backupSidecar.port }}
env:
# Release name (used to identify the service/release the backups came from in remote storage)
- name: RELEASE_NAME
value: {{ .Release.Name }}
# Database related environment variables
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-host
- name: DB_NAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-name
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-db-password
key: password
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-port
- name: DB_USER
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-db-credentials
key: db-user
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: VAULT_NAME
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-vault-secret
key: vault-name
- name: VAULT_SNAPSHOT_SERVER_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-vault-snapshot-config
key: port
{{- end }}
# NoSQL storage related environment variables
# Note, we only worry about self-hosted options as cloud-based should have their own backups etc...
{{- if eq .Values.nosql.type "mongodb" }}
- name: STORAGE_ACCOUNT_CONNECTION_STRING
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-mongo-credentials
key: connection-string
{{- end }}
# Redis related environment variables
# Redis is used for BullMQ, which is how we schedule backups
# We use this instead of, for instance cron jobs, as it lets us deal with failures
- name: REDIS_HOSTNAME
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: hostname
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: {{ .Release.Name }}-cache-configmap
key: port
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-cache-credentials
key: password
{{- if .Values.vault.create.snapshotServer.enabled }}
volumeMounts:
# Mount for a shared volume for Vault credentials
# This is separate from the app's `role vars` volume because it includes other credentials
# In particular, the unseal keys which we require when/if we restore from the backup
# This volume is also read-only where the `role-vars` is read-write (see description below for why)
- name: creds
mountPath: /vault-creds
readOnly: true
# Mount for a shared volume for the Vault's role variables for the app
# This is required by the backup sidecar because if a restart of the app occurs AFTER a vault has been reset (ex. vault using a different container instance),
# despite the vault data being restored the app would receive incorrect credentials (because this is ONLY written during setup of the vault)
# The backup sidecar mitigates this by doing it's own write (to overwrite) once it's done a restore
- name: role-vars
mountPath: /role_vars
{{- end }}
{{- end }} {{- end }}
{{- if .Values.vault.create.enabled }} {{- if .Values.vault.create.enabled }}
volumes: volumes:

View file

@ -1,10 +0,0 @@
{{- if .Values.loggingSidecar.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-logging-sidecar-credentials
labels:
app: {{ .Values.loggingSidecar.name }}
data:
username: {{ .Values.loggingSidecar.auth.username | quote }}
{{- end -}}

View file

@ -1,11 +0,0 @@
{{- if .Values.loggingSidecar.enabled -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-logging-sidecar-password
labels:
app: {{ .Values.loggingSidecar.name }}
type: opaque
data:
password: {{ .Values.loggingSidecar.auth.password | b64enc | quote }}
{{- end -}}

View file

@ -1,15 +0,0 @@
{{- if eq .Values.nosql.type "mongodb" -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-mongo-credentials
type: Opaque
data:
user: {{ .Values.nosql.user | b64enc }}
password: {{ .Values.nosql.password | b64enc }}
{{- if and (.Values.nosql.connectionString) (not .values.nosql.create) }}
connection-string: {{ .Values.nosql.connectionString | b64enc }}
{{- else if .Values.nosql.create }}
connection-string: {{ printf "mongodb://%s:%s@%s-mongo-svc.%s.svc.cluster.local:27017/%s?replicaSet=%s-mongo" .Values.nosql.user .Values.nosql.password .Release.Name .Release.Namespace .Values.nosql.name .Release.Name | b64enc }}
{{- end }}
{{- end -}}

View file

@ -1,16 +0,0 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mongodb-database
namespace: {{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: mongodb-database
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: mongodb-database
namespace: {{ .Release.Namespace }}
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View file

@ -1,22 +0,0 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mongodb-database
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- patch
- delete
- get
{{- end -}}

View file

@ -1,7 +0,0 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: mongodb-database
namespace: {{ .Release.Namespace }}
{{- end -}}

View file

@ -1,31 +0,0 @@
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: {{ .Release.Name }}-mongo
namespace: {{ .Release.Namespace }}
spec:
members: {{ .Values.nosql.replicaCount }}
type: ReplicaSet
version: 4.4.0
security:
authentication:
ignoreUnknownUsers: true
modes:
- SCRAM
tls:
enabled: {{ .Values.nosql.tls.enabled }}
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 10
users:
- name: {{ .Values.nosql.user }}
db: {{ .Values.nosql.name }}
passwordSecretRef:
name: {{ .Release.Name }}-mongo-credentials
key: password
roles:
- name: readWrite
db: {{ .Values.nosql.name }}
scramCredentialsSecretName: {{ .Release.Name }}-mongo-scram
{{- end -}}

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-nosql-grouping
labels:
app: {{ .Release.Name }}
data:
access-properties: {{ .Values.nosql.grouping.accessProperties }}
locales: {{ .Values.nosql.grouping.locales }}
order-properties: {{ .Values.nosql.grouping.orderProperties }}
price-properties: {{ .Values.nosql.grouping.priceProperties }}
service-category-properties: {{ .Values.nosql.grouping.serviceCategoryProperties }}
service-properties: {{ .Values.nosql.grouping.serviceProperties }}

View file

@ -1,49 +0,0 @@
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-redis
labels:
app: redis
spec:
replicas: {{ .Values.cache.replicaCount }}
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: {{ .Values.cache.image.repository | default "bitnami/redis" }}:{{ .Values.cache.image.tag | default "7.0.5" }}
ports:
- containerPort: {{ .Values.cache.port }}
{{- if .Values.cache.tls.enabled }}
- containerPort: {{ .Values.cache.tls.port }}
{{- end }}
env:
- name: ALLOW_EMPTY_PASSWORD
value: "false"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-cache-credentials
key: password
- name: REDIS_DISABLE_COMMANDS
value: "FLUSHDB,FLUSHALL"
# TLS configuration
#- name: REDIS_TLS_ENABLED
# value: "{{ .Values.cache.tls.enabled }}"
#- name: REDIS_TLS_AUTH_CLIENTS
# value: "yes"
#- name: REDIS_TLS_PORT_NUMBER
# value: "{{ .Values.cache.tls.port }}"
volumeMounts:
- name: redis-data
mountPath: /bitnami/redis
volumes:
- name: redis-data
emptyDir: {}
{{- end -}}

View file

@ -1,15 +0,0 @@
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-redis
labels:
app: redis
spec:
ports:
- port: {{ .Values.cache.port }}
targetPort: {{ .Values.cache.port }}
selector:
app: redis
type: ClusterIP
{{- end -}}

View file

@ -8,4 +8,4 @@ spec:
ports: ports:
- protocol: TCP - protocol: TCP
port: 80 port: 80
targetPort: {{ .Values.container.port }} targetPort: {{ .Values.app.container.port }}

View file

@ -1,15 +0,0 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-vault-creds
labels:
app: {{ .Release.Name }}-vault
spec:
storageClassName: {{ .Values.vault.create.storage.storageClass }}
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.vault.create.storage.size }}
{{- end -}}

View file

@ -1,66 +0,0 @@
{{- if and (.Values.vault.create.enabled) (eq .Values.vault.type "hashicorp") -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-vault
labels:
app: {{ .Release.Name }}-vault
spec:
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-vault
template:
metadata:
labels:
app: {{ .Release.Name }}-vault
spec:
containers:
- name: {{ .Release.Name }}-vault
image: {{ .Values.vault.create.image.repository }}:{{ .Values.vault.create.image.tag }}
ports:
- containerPort: 8200
- containerPort: 8201
{{- if .Values.vault.create.snapshotServer.enabled }}
- containerPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- end }}
env:
- name: VAULT_ADDR
value: http://0.0.0.0:8200
- name: POLICY_CAPABILITIES
value: {{ .Values.vault.create.policyCapabilities | join "," }}
- name: ROLE_ID_SECRET_NAME
value: VAULT_ROLE_ID
- name: SECRET_ID_SECRET_NAME
value: VAULT_SECRET_ID
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: SNAPSHOT_SERVER_PORT
value: {{ .Values.vault.create.snapshotServer.internalPort | quote }}
{{- end }}
volumeMounts:
- name: vault-data
mountPath: /vault/data
- name: vault-log
mountPath: /vault/logs
- name: vault-creds
mountPath: /vault/creds
- name: vault-role-vars
mountPath: /role_vars
capAdd:
- IPC_LOCK
volumes:
- name: vault-data
emptyDir: {}
- name: vault-log
emptyDir: {}
- name: vault-creds
{{- if .Values.vault.create.snapshotServer.enabled }}
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-creds
{{- else }}
emptyDir: {}
{{- end }}
- name: vault-role-vars
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-role-vars
{{- end -}}

View file

@ -1,24 +0,0 @@
{{- if and (.Values.vault.enabled) (eq .Values.vault.type "hashicorp") -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-vault-secret
type: opaque
data:
{{- if .Values.vault.create.enabled }}
# Because we create the Hashicorp Vault instance as part of the Helm chart,
# we can use the name of the created resource (utilizing k8s built-in container connections)
# to connect to the Vault instance without having to hard-code the Vault name.
vault-name: {{ printf "%s-vault" .Release.Name | b64enc }}
# Because we create the Hashicorp Vault instance as part of the Helm chart,
# We know the port that the Vault instance is running on.
vault-port: {{ printf "%d" 80 | b64enc }}
{{- else }}
# Because the Vault wasn't created as part of the Helm chart,
# we need the deployer to specify the name of the Vault instance to connect to.
vault-name: {{ .Values.vault.vaultName | b64enc }}
# Because the Vault wasn't created as part of the Helm chart,
# we need the deployer to specify the port that the Vault instance is running on.
vault-port: {{ .Values.passVault.vaultPort | b64enc }}
{{- end }}
{{- end -}}

View file

@ -1,21 +0,0 @@
{{- if .Values.vault.create.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-vault-ingress
labels:
app: {{ .Release.Name }}-vault
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.vault.create.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}-vault
port:
number: 80
{{- end -}}

View file

@ -1,15 +0,0 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-vault-role-vars
labels:
app: {{ .Release.Name }}-vault
spec:
storageClassName: {{ .Values.vault.create.storage.storageClass }}
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.vault.create.storage.size }}
{{- end -}}

View file

@ -1,22 +0,0 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-vault
labels:
app: {{ .Release.Name }}-vault
spec:
selector:
app: {{ .Release.Name }}-vault
ports:
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: custom-snapshot-server
protocol: TCP
port: {{ .Values.vault.create.snapshotServer.externalPort }}
targetPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- end }}
- name: http
protocol: TCP
port: 80
targetPort: 8200
{{- end -}}

View file

@ -1,8 +0,0 @@
{{- if .Values.vault.create.snapshotServer.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-vault-snapshot-config
data:
port: {{ .Values.vault.create.snapshotServer.externalPort | quote }}
{{- end -}}

View file

@ -1,27 +1,40 @@
# The number of instances (replicas) of the app to run # Configuration for the Services Dashboard itself
replicaCount: 1 app:
# The number of instances (replicas) of the app to run
replicaCount: 1
image: # Configurations around the image that is used for the Services Dashboard
# The repository of the image to use for the app image:
# Should be in the format `<Image Repository (Ex. containers.example.com)>/<Image Name (Ex. app)>` # The repository of the image to use for the app
repository: "<Image Repository>/<Image Name>" # Should be in the format `<Image Repository (Ex. containers.example.com)>/<Image Name (Ex. app)>`
repository: "<Image Repository>/<Image Name>"
# The specific image tag to use. It's recommended to use some kind of versioning tag scheme as it makes updating the container without having to fully redeploy easier.
# Ex. v1.0.0
tag: "v1.0.0"
# How often the image should be pulled. The possible values are "Always", "Never", and "IfNotPresent"
# It's recommended for production to use "IfNotPresent" to avoid pulling the image every time the pod starts
# Though, for development, "Always" is recommended to ensure the latest changes are being tested
pullPolicy: "IfNotPresent"
container: # The specific image tag to use. It's recommended to use some kind of versioning tag scheme as it makes updating the container without having to fully redeploy easier.
# The port that the container listens on (Ex. 8080) # Ex. v1.0.0
port: 8080 tag: "v1.0.0"
# How often the image should be pulled. The possible values are "Always", "Never", and "IfNotPresent"
# It's recommended for production to use "IfNotPresent" to avoid pulling the image every time the pod starts
# Though, for development, "Always" is recommended to ensure the latest changes are being tested
pullPolicy: "IfNotPresent"
container:
# The port that the container listens on (Ex. 8080)
port: 8080
# The environment that the container is running in (Ex. development, production, etc...)
# This is used for the NODE_ENV environment variable
env: "production"
# The environment that the container is running in (Ex. development, production, etc...) resources:
# This is used for the NODE_ENV environment variable requests:
env: "production" cpu: 200m
memory: 512Mi
ephemeralStorage: 50Mi
limits:
cpu: 1000m
memory: 512Mi
ephemeralStorage: 1Gi
# The timestamp of the backup that the entrypoint script should wait for a restore to complete # The timestamp of the backup that the entrypoint script should wait for a restore to complete
restoreFromBackup: "" restoreFromBackup: ""
@ -50,17 +63,15 @@ oauth:
devPort: "<Dev Port>" devPort: "<Dev Port>"
# Configuration for the relational database # Configuration for the relational database
# See the [Database Deployment Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/db-deploy-helm) for more information
database: database:
# If a relational database should be used
enabled: true
# This override tells the helper: "Ignore the alias (Chart Name - `database`), use this string for K8s resources instead"
nameOverride: "db"
# The type of the relational database that is used. # The type of the relational database that is used.
#
# The following table lists the possible values for this field:
#
# | Value | Description |
# | ---------- | ------------------------------------------ |
# | `postgres` | Uses PostgreSQL as the relational database |
#
# Note, for use of `postgres`, it uses a [`postgres-controller` CRD](https://github.com/AlanBridgeman/postgres-controller) to create the database
#
type: "postgres" type: "postgres"
# If set to `true`, the database will be created as part of the deployment # If set to `true`, the database will be created as part of the deployment
@ -78,36 +89,68 @@ database:
# The password that is used to access the database # The password that is used to access the database
password: "<DB Password>" password: "<DB Password>"
# The port that the database listens on
#port: 5432
# Allows for distinguishing between multiple database instances/servers
#instance_id: ""
# Configurations for the secrets vault # Configurations for the secrets vault
# See the [Customized Hashicorp Vault Implementation - Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/custom-hashicorp-vault-helm) for more information
vault: vault:
# If a secrets vault should be used # If a secrets vault should be used
# That is, if a dedicated software for secret management should be used # That is, if a dedicated software for secret management should be used
# This should virtually always be true if storing any kind of sensitive information as it's the most secure option # This should virtually always be true if storing any kind of sensitive information as it's the most secure option
enabled: true enabled: true
# The type of secrets vault to use. # The type of secrets vault (or storage if `enabled` is `false`) to use.
# #
# Vaults # Vaults
# ------ # ------
# The following table lists the supported vault types: # The following table lists the supported vault types:
# #
# | Type | Description | Current Status | Required Fields | # | Type | Description |
# | ----------- | -------------------- | -------------- | --------------------------------------------------- | # | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
# | `hashicorp` | Uses Hashicorp Vault | Implemented | `vaultName` (if `create` not true) | # | `hashicorp` | [Customized Hashicorp Vault Implementation - Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/custom-hashicorp-vault-helm) |
# | `azure` | Uses Azure Key Vault | Implemented | `vaultName`, `clientId`, `clientSecret`, `tenantId` | # | `azure` | Uses Azure Key Vault (Required fields: `vaultName`, `clientId`, `clientSecret`, `tenantId`) |
#
# Storage
# -------
# The following table lists the supported storage types.
# These are methods OUTSIDE of a dedicated "vault" software.
# These are generally **discouraged** as they are less secure.
#
# | Type | Description | Current Status | Required Fields |
# | ----------- | ------------------------------- | -------------- | ---------------- |
# | `file` | Uses a file | To-Do | `path` |
# | `mem` | Uses in-memory (no persistance) | To-Do | N/A |
# #
type: "<Vault Type>" type: "<Vault Type>"
# The name of the vault instance to connect to
#
# This is relevant if type is set to `hashicorp` or `azure`
# Note, if `create` is true this is ignored
#
# For `hashicorp`, see subchart's `values.yaml` for details
# For `azure`, this is the name of the Azure Key Vault instance to connect to
#vaultName: "<Vault Name>"
# The client ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-id: <Azure Key Vault Client ID>
# The client secret of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-secret: <Azure Key Vault Client Secret>
# The tenant ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#tenant-id: <Azure Key Vault Tenant ID>
# Configurations to create a Hashicorp Vault instance as part of the Helm chart # Configurations to create a Hashicorp Vault instance as part of the Helm chart
# #
# THIS IS ONLY RELEVANT IF `type` IS SET TO `hashicorp` # THIS IS ONLY RELEVANT IF `type` IS SET TO `hashicorp`
#
# See subchart's `values.yaml` for more information
create: create:
# If a Hashicorp Vault instance should be created as part of the Helm chart # If a Hashicorp Vault instance should be created as part of the Helm chart
enabled: <true/false> enabled: <true/false>
@ -141,88 +184,35 @@ vault:
# The internal port that the snapshot server listens on # The internal port that the snapshot server listens on
internalPort: 8300 internalPort: 8300
# Configurations for the storage of the created Hashicorp Vault instance appRole:
storage: # The name of the environment variable/secret that contains the Role ID for the AppRole used by the app
# The storage class to use for the created Hashicorp Vault instance's Persistent Volume Claim roleIDSecretName: VAULT_ROLE_ID
storageClass: <Storage Class>
# The name of the environment variable/secret that contains the Secret ID for the AppRole used by the app
# The size of the created Hashicorp Vault instance's Persistent Volume Claim secretIDSecretName: VAULT_SECRET_ID
size: <Storage Size>
# The capabilities of the policy to create for the app # The capabilities of the policy to create for the app
policyCapabilities: policyCapabilities:
- read - read
- create - create
- update - update
# The name of the vault instance to connect to
#
# This is relevant if type is set to `hashicorp` or `azure`
# Note, if `create` is true this is ignored
#
# For `hashicorp`, this is generally the hostname of the Hashicorp Vault instance to connect to
# For `azure`, this is the name of the Azure Key Vault instance to connect to
#vaultName: "<Vault Name>"
# The port of the vault instance to connect to
#
# ONLY RELEVANT iF `type` IS SET TO `hashicorp` AND `create` IS NOT TRUE
#vaultPort: <Vault Port>
# The client ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-id: <Azure Key Vault Client ID>
# The client secret of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-secret: <Azure Key Vault Client Secret>
# The tenant ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#tenant-id: <Azure Key Vault Tenant ID>
# Configuration the NoSQL database # Configuration the NoSQL database
# Within the parlance of the system these are often called "properties" databases (and store less structured data) # Within the parlance of the system these are often called "properties" databases (and store less structured data)
# See the [NoSQL Deployment Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/nosql-deploy-helm) for more information
nosql: nosql:
# Determines the type of NoSQL storage that is used # If a NoSQL database should be used
# enabled: true
# The following table lists the possible values for this field:
# # Determines the type of NoSQL storage that is used
# | Value | Description |
# | --------- | ------------------------------------------------------------------------------------------ |
# | `mongodb` | Uses MongoDB as the NoSQL database for the default account properties database |
# | `azure` | Uses Azure Table Storage as the NoSQL database for the default account properties database |
#
type: <NoSQL Storage Type> type: <NoSQL Storage Type>
# If to create a resource as part of the deployment process
# ONLY relevant if `type` is set to `mongodb` # ONLY relevant if `type` is set to `mongodb`
# This uses the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) to create the resource # This uses the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) to create the resource
create: false create:
# If to create a resource as part of the deployment process
# The number of replicas/members as part of the Mongo deployment
# See the `member` parameter of the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) for more information
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `true`
#replicaCount: <Number of replicas>
# The TLS configuration for the connection to the NoSQL database
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `true`
tls:
# If to use TLS for the connection to the NoSQL database
enabled: <true/false> enabled: <true/false>
# The connection string used to access the NoSQL database
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `false`
# Should be in the following format: `mongodb://<hostname>:<port>`
#connectionString: "mongodb://mongo.example.com:27017"
# The key used to access the NoSQL database
# ONLY relevant if `type` is set to `azure`
#key: ""
# The name of the NoSQL database # The name of the NoSQL database
name: "<NoSQL DB Name>" name: "<NoSQL DB Name>"
@ -240,40 +230,28 @@ nosql:
grouping: grouping:
<Table Intermediate Name>: "<NoSQL Table Name>" <Table Intermediate Name>: "<NoSQL Table Name>"
# Configuration for cache server # Configuration for Redis cache
# See the [Cache Deployment Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/cache-deploy-helm) for more information
cache: cache:
# If a cache (Redis) should be used
enabled: true
# This override tells the helper: "Ignore the alias (Chart Name - `cache`), use this string for K8s resources instead"
nameOverride: "redis"
type: "redis" type: "redis"
# If to create a Redis instance/resource as part of the deployment process # Configurations if creating Redis resources as part of the deployment
create: false create:
# If to create a Redis instance/resource as part of the deployment process
# The image to use for the Redis instance enabled: true
# ONLY relevant if `create` is set to `true`
image: {} redisData:
size: 2Gi
# The number of replicas of the Redis instance
# ONLY relevant if `create` is set to `true`
#replicaCount: <Number of replicas (Ex. 1)>
# Hostname of the Redis server
# ONLY relevant if `create` is set to `false`
#hostName: "<Redis Host Name>"
# The password to use for the Redis server # The password to use for the Redis server
password: "<Redis Password>" password: "<Redis Password>"
# The port of the Redis server
port: "<Redis Port>"
# Redis TLS Configurations
tls:
# If TLS is enabled for the Redis instance
enabled: false
# The port of the Redis instance for TLS
# ONLY relevant if `tls.enabled` is set to `true`
#port: "<TLS Port (Ex. 6380)>"
# Configurations for communication services/relays # Configurations for communication services/relays
comms: comms:
# Configurations for Email # Configurations for Email
@ -362,6 +340,9 @@ loggingSidecar:
# If the logging sidecar should be used (enabled) # If the logging sidecar should be used (enabled)
enabled: true enabled: true
# This override tells the helper: "Ignore the alias (Chart Name - `loggingSidecar`), use this string for K8s resources instead"
nameOverride: "logging-sidecar"
# The image used for the logging sidecar # The image used for the logging sidecar
image: image:
repository: "<repository>/<Logging Sidecar Image Name>" repository: "<repository>/<Logging Sidecar Image Name>"
@ -378,6 +359,16 @@ loggingSidecar:
auth: auth:
username: "<Log Aggregator (OpenObserve) Username>" username: "<Log Aggregator (OpenObserve) Username>"
password: "<Log Aggregator (OpenObserve) Password>" password: "<Log Aggregator (OpenObserve) Password>"
resources:
requests:
cpu: 20m
memory: 128Mi
ephemeralStorage: 50Mi
limits:
cpu: 200m
memory: 256Mi
ephemeralStorage: 200Mi
# Configuration for the backup sidecar # Configuration for the backup sidecar
backupSidecar: backupSidecar:
@ -394,4 +385,14 @@ backupSidecar:
name: backup-sidecar name: backup-sidecar
# The port that the backup sidecar listens on # The port that the backup sidecar listens on
port: 3001 port: 3001
resources:
requests:
cpu: 20m
memory: 128Mi
ephemeralStorage: 50Mi
limits:
cpu: 500m
memory: 256Mi
ephemeralStorage: 200Mi