Major change that brought the chart in line with others. Mainly by more heavily relyng on subcharts and *.tpl files for code modularity and reusability (and consistency)
All checks were successful
Deploy the Helm Chart / build (push) Successful in 15s
All checks were successful
Deploy the Helm Chart / build (push) Successful in 15s
This commit is contained in:
parent
84a322eb28
commit
563a76b84e
34 changed files with 621 additions and 1103 deletions
|
|
@ -34,12 +34,17 @@ jobs:
|
|||
helm repo add BridgemanAccessible https://helm.bridgemanaccessible.ca
|
||||
|
||||
# Check if the chart is already in the repository or not
|
||||
REMOTE_CHART_WORK_OUTPUT=$(helm search repo BridgemanAccessible/$CHART_NAME 2>/dev/null || echo "")
|
||||
if [ -n "$REMOTE_CHART_WORK_OUTPUT" ]; then
|
||||
SEARCH_JSON=$(helm search repo BridgemanAccessible/$CHART_NAME --output json)
|
||||
|
||||
# Parse the JSON to see if our specific chart name exists in the results
|
||||
IS_FOUND=$(echo "$SEARCH_JSON" | jq -r ".[] | select(.name == \"BridgemanAccessible/$CHART_NAME\") | .name")
|
||||
|
||||
if [ -n "$IS_FOUND" ]; then
|
||||
# The chart is already in the repository, so we need to check if the version is the same or not
|
||||
echo "Chart already exists in the repository. Checking version..."
|
||||
|
||||
IFS=' ' read -r -a REMOTE_VERSIONS <<< $(helm search repo BridgemanAccessible/$CHART_NAME --output json | jq '.[].version | @sh')
|
||||
# Parse the versions from the ALREADY fetched JSON
|
||||
IFS=' ' read -r -a REMOTE_VERSIONS <<< $(echo "$SEARCH_JSON" | jq -r ".[] | select(.name == \"BridgemanAccessible/$CHART_NAME\") | .version")
|
||||
|
||||
echo "Remote Chart Versions: ${REMOTE_VERSIONS[@]}"
|
||||
else
|
||||
|
|
@ -47,7 +52,7 @@ jobs:
|
|||
echo "Chart not found in the repository. Adding it..."
|
||||
|
||||
# Set a blank value so that it WON'T match the local version
|
||||
IFS=' ' read -r -a REMOTE_VERSIONS <<< ""
|
||||
REMOTE_VERSIONS=()
|
||||
fi
|
||||
|
||||
# Just to keep things clean/safe/etc... remove the repo
|
||||
|
|
@ -76,7 +81,7 @@ jobs:
|
|||
fi
|
||||
|
||||
# If the versions match, we want to increment the chart's patch version
|
||||
if [ "has_match" ]; then
|
||||
if [ "$has_match" == "true" ]; then
|
||||
echo "Versions match!"
|
||||
|
||||
# Increment the patch version of the local version (Ex. 1.0.0 -> 1.0.1)
|
||||
|
|
@ -108,251 +113,258 @@ jobs:
|
|||
echo "Versions do not match!"
|
||||
fi
|
||||
|
||||
# Download Helm dependencies
|
||||
helm dependency build .
|
||||
|
||||
# Verify the chart is valid
|
||||
helm lint
|
||||
|
||||
# Package and push the Helm chart to the OCI registry
|
||||
helm package .
|
||||
helm push ./$CHART_NAME-$LOCAL_VERSION.tgz oci://${{ secrets.REPOSITORY_HOSTNAME }}/helm
|
||||
|
||||
# Deploy to our environment (stagging cluster)
|
||||
deploy:
|
||||
runs-on: self-hosted
|
||||
needs: build
|
||||
outputs:
|
||||
backup-created: ${{ steps.backup.outputs.created }}
|
||||
restore-time: ${{ steps.deploy-helm-chart.outputs.RESTORE_TIME }}
|
||||
env:
|
||||
NAMESPACE: ciam-services-dashboard
|
||||
RELEASE_NAME: services
|
||||
CHART_NAME: ${{ needs.build.outputs.chart-name }}
|
||||
steps:
|
||||
# Check if it's already deployed
|
||||
# This determines if we do the backup and takedown before deploying the new version
|
||||
- name: Already Deployed Check
|
||||
id: already-deployed-check
|
||||
run: |
|
||||
# Check if the Helm chart is already deployed
|
||||
if helm status -n ${{ env.NAMESPACE }} ${{ env.RELEASE_NAME }}; then
|
||||
echo "Already deployed"
|
||||
echo "already-deployed=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Not deployed"
|
||||
echo "already-deployed=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Tale a backup of the current state of the resources
|
||||
- name: Backup Data
|
||||
id: backup
|
||||
if: steps.already-deployed-check.outputs.already-deployed == 'true'
|
||||
run: |
|
||||
# -- Setup Backup Sidecar metadata variables --
|
||||
|
||||
# Variable for the image name (so that there can't be typos etc...)
|
||||
BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
|
||||
|
||||
# Get the available tags from the image registry
|
||||
IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
|
||||
|
||||
# Get the latest tag from the list of tags
|
||||
LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
|
||||
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
|
||||
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
|
||||
echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
|
||||
|
||||
# Get/Generate the "full" image name (including the tag) for the backup sidecar
|
||||
FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
|
||||
echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
|
||||
|
||||
# -- END: Setup Backup Sidecar metadata variables --
|
||||
|
||||
# -- Create a backup --
|
||||
|
||||
# Get the name of the running main pod (the one that has the backup sidecar container in it)
|
||||
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
|
||||
echo "Running Main Pod: $RUNNING_MAIN_POD"
|
||||
|
||||
# Make the request to create the backups
|
||||
CREATE_BACKUP_OUTPUT=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.RELEASE_NAME }} -- /bin/bash -c "curl -X PUT -sSL http://localhost:4000/backup -o backup-output.txt && cat backup-output.txt")
|
||||
echo "Create Backup Output: $CREATE_BACKUP_OUTPUT"
|
||||
|
||||
# Parse the output to check if the backup was created successfully
|
||||
BACKUP_OUTPUT_MESSAGE=$(echo $CREATE_BACKUP_OUTPUT | jq -r '.message')
|
||||
if [ "$BACKUP_OUTPUT_MESSAGE" == "Backup created successfully" ]; then
|
||||
echo "Backup created successfully."
|
||||
echo "created=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Error creating backup: $BACKUP_OUTPUT_MESSAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# -- END: Create a backup --
|
||||
|
||||
# Because we've taken a backup of it's current state and it's easier to start fresh etc...
|
||||
# We delete all the existing resources (including the Helm chart) before (re-)deploying the new one
|
||||
- name: Remove old resources
|
||||
if: steps.backup.outputs.created == 'true'
|
||||
run: |
|
||||
# Use a custom script (found in [Helpful Scripts]()) to uninstall the Helm chart and delete any associated resources
|
||||
k8s-delete \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
--release-name ${{ env.RELEASE_NAME }} \
|
||||
--rwx-volumes services-vault-role-vars
|
||||
|
||||
# Deploy the resources to the cluster
|
||||
- name: Deploy Helm Chart
|
||||
id: deploy-helm-chart
|
||||
run: |
|
||||
FILLED_VALUES_FILE="values.filled.yaml"
|
||||
|
||||
# Download a filled version of the `values.yaml` file from a secure location
|
||||
DOWNLOAD_FILE=$(curl -sSL https://secure-storage.bridgemanaccessible.ca/services-dashboard/values.filled.yaml?token=${{ secrets.SECURE_STORAGE_TOKEN }} -o $FILLED_VALUES_FILE || echo "Failed to download filled values file.")
|
||||
if [ "$DOWNLOAD_FILE" == "Failed to download filled values file." ]; then
|
||||
echo "Error: $DOWNLOAD_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "${{ steps.backup.outputs.created }}" ]; then
|
||||
CURR_DATETIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
echo "Setting restoreFromBackup to: $CURR_DATETIME"
|
||||
|
||||
# If the backup was created successfully, we want to set the `restoreFromBackup` variable to true in the filled values file
|
||||
sed -i "s|restoreFromBackup: \"\"|restoreFromBackup: \"$CURR_DATETIME\"|g" $FILLED_VALUES_FILE
|
||||
|
||||
echo "RESTORE_TIME=$CURR_DATETIME" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Parse the chart name from the Chart.yaml
|
||||
CHART_NAME=${{ env.CHART_NAME }}
|
||||
echo "Chart Name: $CHART_NAME"
|
||||
|
||||
# Can run `k8s-deploy --help` if you want to see all the options available
|
||||
k8s-deploy \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
--release-name ${{ env.RELEASE_NAME }} \
|
||||
--filled-values-file $FILLED_VALUES_FILE \
|
||||
--chart-name $CHART_NAME \
|
||||
--rwx-volumes services-vault-role-vars
|
||||
#deploy:
|
||||
# runs-on: self-hosted
|
||||
# needs: build
|
||||
# outputs:
|
||||
# backup-created: ${{ steps.backup.outputs.created }}
|
||||
# restore-time: ${{ steps.deploy-helm-chart.outputs.RESTORE_TIME }}
|
||||
# env:
|
||||
# NAMESPACE: ciam-services-dashboard
|
||||
# RELEASE_NAME: services
|
||||
# CHART_NAME: ${{ needs.build.outputs.chart-name }}
|
||||
# steps:
|
||||
# # Check if it's already deployed
|
||||
# # This determines if we do the backup and takedown before deploying the new version
|
||||
# - name: Already Deployed Check
|
||||
# id: already-deployed-check
|
||||
# run: |
|
||||
# # Check if the Helm chart is already deployed
|
||||
# if helm status -n ${{ env.NAMESPACE }} ${{ env.RELEASE_NAME }}; then
|
||||
# echo "Already deployed"
|
||||
# echo "already-deployed=true" >> $GITHUB_OUTPUT
|
||||
# else
|
||||
# echo "Not deployed"
|
||||
# echo "already-deployed=false" >> $GITHUB_OUTPUT
|
||||
# fi
|
||||
#
|
||||
# # Tale a backup of the current state of the resources
|
||||
# - name: Backup Data
|
||||
# id: backup
|
||||
# if: steps.already-deployed-check.outputs.already-deployed == 'true'
|
||||
# run: |
|
||||
# # -- Setup Backup Sidecar metadata variables --
|
||||
#
|
||||
# # Variable for the image name (so that there can't be typos etc...)
|
||||
# BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
|
||||
#
|
||||
# # Get the available tags from the image registry
|
||||
# IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
|
||||
#
|
||||
# # Get the latest tag from the list of tags
|
||||
# LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
|
||||
# LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
|
||||
# LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
|
||||
# echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
|
||||
#
|
||||
# # Get/Generate the "full" image name (including the tag) for the backup sidecar
|
||||
# FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
|
||||
# echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
|
||||
#
|
||||
# # -- END: Setup Backup Sidecar metadata variables --
|
||||
#
|
||||
# # -- Create a backup --
|
||||
#
|
||||
# # Get the name of the running main pod (the one that has the backup sidecar container in it)
|
||||
# RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
|
||||
# echo "Running Main Pod: $RUNNING_MAIN_POD"
|
||||
#
|
||||
# # Make the request to create the backups
|
||||
# CREATE_BACKUP_OUTPUT=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.RELEASE_NAME }} -- /bin/bash -c "curl -X PUT -sSL http://localhost:4000/backup -o backup-output.txt && cat backup-output.txt")
|
||||
# echo "Create Backup Output: $CREATE_BACKUP_OUTPUT"
|
||||
#
|
||||
# # Parse the output to check if the backup was created successfully
|
||||
# BACKUP_OUTPUT_MESSAGE=$(echo $CREATE_BACKUP_OUTPUT | jq -r '.message')
|
||||
# if [ "$BACKUP_OUTPUT_MESSAGE" == "Backup created successfully" ]; then
|
||||
# echo "Backup created successfully."
|
||||
# echo "created=true" >> $GITHUB_OUTPUT
|
||||
# else
|
||||
# echo "Error creating backup: $BACKUP_OUTPUT_MESSAGE"
|
||||
# exit 1
|
||||
# fi
|
||||
#
|
||||
# # -- END: Create a backup --
|
||||
#
|
||||
# # Because we've taken a backup of it's current state and it's easier to start fresh etc...
|
||||
# # We delete all the existing resources (including the Helm chart) before (re-)deploying the new one
|
||||
# - name: Remove old resources
|
||||
# if: steps.backup.outputs.created == 'true'
|
||||
# run: |
|
||||
# # Use a custom script (found in [Helpful Scripts]()) to uninstall the Helm chart and delete any associated resources
|
||||
# k8s-delete \
|
||||
# --namespace ${{ env.NAMESPACE }} \
|
||||
# --release-name ${{ env.RELEASE_NAME }} \
|
||||
# --rwx-volumes services-vault-role-vars
|
||||
#
|
||||
# # Deploy the resources to the cluster
|
||||
# - name: Deploy Helm Chart
|
||||
# id: deploy-helm-chart
|
||||
# run: |
|
||||
# FILLED_VALUES_FILE="values.filled.yaml"
|
||||
#
|
||||
# # Download a filled version of the `values.yaml` file from a secure location
|
||||
# DOWNLOAD_FILE=$(curl -sSL https://secure-storage.bridgemanaccessible.ca/services-dashboard/values.filled.yaml?token=${{ secrets.SECURE_STORAGE_TOKEN }} -o $FILLED_VALUES_FILE || echo "Failed to download filled values file.")
|
||||
# if [ "$DOWNLOAD_FILE" == "Failed to download filled values file." ]; then
|
||||
# echo "Error: $DOWNLOAD_FILE"
|
||||
# exit 1
|
||||
# fi
|
||||
#
|
||||
# if [ -n "${{ steps.backup.outputs.created }}" ]; then
|
||||
# CURR_DATETIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
#
|
||||
# echo "Setting restoreFromBackup to: $CURR_DATETIME"
|
||||
#
|
||||
# # If the backup was created successfully, we want to set the `restoreFromBackup` variable to true in the filled values file
|
||||
# sed -i "s|restoreFromBackup: \"\"|restoreFromBackup: \"$CURR_DATETIME\"|g" $FILLED_VALUES_FILE
|
||||
#
|
||||
# echo "RESTORE_TIME=$CURR_DATETIME" >> $GITHUB_OUTPUT
|
||||
# fi
|
||||
#
|
||||
# # Parse the chart name from the Chart.yaml
|
||||
# CHART_NAME=${{ env.CHART_NAME }}
|
||||
# echo "Chart Name: $CHART_NAME"
|
||||
#
|
||||
# # Can run `k8s-deploy --help` if you want to see all the options available
|
||||
# k8s-deploy \
|
||||
# --namespace ${{ env.NAMESPACE }} \
|
||||
# --release-name ${{ env.RELEASE_NAME }} \
|
||||
# --filled-values-file $FILLED_VALUES_FILE \
|
||||
# --chart-name $CHART_NAME \
|
||||
# --rwx-volumes services-vault-role-vars
|
||||
|
||||
# Once deployed, we want to restore it to it's previous state (if applicable)
|
||||
restore:
|
||||
runs-on: self-hosted
|
||||
needs: deploy
|
||||
if: needs.deploy.outputs.backup-created == 'true'
|
||||
env:
|
||||
NAMESPACE: ciam-services-dashboard
|
||||
CONTAINER_NAME: services
|
||||
RESTORE_TIME: ${{ needs.deploy.outputs.restore-time }}
|
||||
SITE_HOSTNAME: services.bridgemanaccessible.ca
|
||||
steps:
|
||||
# Restore the data from the backup
|
||||
- name: Restore data
|
||||
run: |
|
||||
# -- Setup Backup Sidecar metadata variables --
|
||||
|
||||
# Variable for the image name (so that there can't be typos etc...)
|
||||
BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
|
||||
|
||||
# Get the available tags from the image registry
|
||||
IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
|
||||
|
||||
# Get the latest tag from the list of tags
|
||||
LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
|
||||
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
|
||||
LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
|
||||
echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
|
||||
|
||||
# Get/Generate the "full" image name (including the tag) for the backup sidecar
|
||||
FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
|
||||
echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
|
||||
|
||||
# -- END: Setup Backup Sidecar metadata variables --
|
||||
|
||||
# Get the name of the running main pod (the one that has the backup sidecar container in it)
|
||||
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
|
||||
echo "Running Main Pod: $RUNNING_MAIN_POD"
|
||||
|
||||
# Variables for retry logic
|
||||
MAX_RETRIES=5
|
||||
RETRY_INTERVAL=30
|
||||
RETRY_COUNT=0
|
||||
|
||||
# Because Kubernetes (or at least our stagging cluster) can be a bit tempramental,
|
||||
# We allow for "resetting" a few times.
|
||||
# By resetting, we mean re-detecting the main running pod etc...
|
||||
MAX_RESETS=3
|
||||
RESET_COUNT=0
|
||||
|
||||
POD_STATUS=$(kubectl -n ${{ env.NAMESPACE }} get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
|
||||
echo "Pod Status: $POD_STATUS"
|
||||
|
||||
# Retry (wait) a few times if the pod isn't running yet
|
||||
while [ "$POD_STATUS" != "Running" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
|
||||
echo "Pod is not running yet (Current Status: $POD_STATUS). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
|
||||
sleep $RETRY_INTERVAL
|
||||
|
||||
# Get the current status of the pod
|
||||
POD_STATUS=$(kubectl -n ciam-account-dashboard get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
|
||||
|
||||
# Increment the retry count
|
||||
RETRY_COUNT=$((RETRY_COUNT + 1))
|
||||
|
||||
# If the pod is in a failed state, we want to "reset"
|
||||
# Though, we only want to reset a few times (to avoid infinite loops if something else is wrong etc...)
|
||||
if [[ "$POD_STATUS" == "Failed" ]] && [ $RESET_COUNT -lt $MAX_RESETS ]; then
|
||||
echo "Pod is in a failed state. Resetting..."
|
||||
|
||||
# Reset the pod and increment the reset count
|
||||
RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" --arg prev "$RUNNING_MAIN_POD" '.items[] | select(.spec.containers[].image == $img and .metadata.name != $prev) | .metadata.name')
|
||||
echo "Running Main Pod Reset: $RUNNING_MAIN_POD"
|
||||
|
||||
# Reset the retry count
|
||||
RETRY_COUNT=0
|
||||
echo "Retry Count Reset"
|
||||
|
||||
RESET_COUNT=$((RESET_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$POD_STATUS" != "Running" ]; then
|
||||
echo "Error: Pod did not start running after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LATEST_BACKUPS=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -sSL http://localhost:4000/backups/latest -o latest-backup.json && cat latest-backup.json")
|
||||
echo "Latest Backups: $LATEST_BACKUPS"
|
||||
|
||||
LATEST_DB_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.db')
|
||||
echo "Latest DB Backup: $LATEST_DB_BACKUP"
|
||||
|
||||
LATEST_VAULT_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.vault')
|
||||
echo "Latest Vault Backup: $LATEST_VAULT_BACKUP"
|
||||
|
||||
echo "Restoring Vault Backup: $LATEST_VAULT_BACKUP at restore time: ${{ env.RESTORE_TIME }}"
|
||||
restore_output=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -s -X POST -H 'Content-Type: application/json' -d '{ \"restoreTime\": \"${{ env.RESTORE_TIME }}\" }' http://localhost:4000/restore/latest -o restore-output.txt && cat restore-output.txt")
|
||||
echo "Restore Output: $restore_output"
|
||||
|
||||
# Verify that the site starts up after the restore
|
||||
- name: Verify Restore
|
||||
run: |
|
||||
SITE_REQUEST_STATUS_CODE=$(curl -sSL https://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null)
|
||||
|
||||
MAX_RETRIES=5
|
||||
RETRY_INTERVAL=30
|
||||
RETRY_COUNT=0
|
||||
|
||||
# Retry (wait) a few times if the site isn't up yet
|
||||
while [ "$SITE_REQUEST_STATUS_CODE" != "200" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
|
||||
echo "Site is not up yet (Current Status Code: $SITE_REQUEST_STATUS_CODE). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
|
||||
sleep $RETRY_INTERVAL
|
||||
|
||||
# Get the current status of the site
|
||||
SITE_REQUEST_STATUS_CODE=$(curl -sSL http://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null)
|
||||
|
||||
# Increment the retry count
|
||||
RETRY_COUNT=$((RETRY_COUNT + 1))
|
||||
done
|
||||
|
||||
if [ "$SITE_REQUEST_STATUS_CODE" != "200" ]; then
|
||||
echo "Error: Site did not start up after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Site is up and running (Status Code: $SITE_REQUEST_STATUS_CODE)."
|
||||
#restore:
|
||||
# runs-on: self-hosted
|
||||
# needs: deploy
|
||||
# if: needs.deploy.outputs.backup-created == 'true'
|
||||
# env:
|
||||
# NAMESPACE: ciam-services-dashboard
|
||||
# CONTAINER_NAME: services
|
||||
# RESTORE_TIME: ${{ needs.deploy.outputs.restore-time }}
|
||||
# SITE_HOSTNAME: services.bridgemanaccessible.ca
|
||||
# steps:
|
||||
# # Restore the data from the backup
|
||||
# - name: Restore data
|
||||
# run: |
|
||||
# # -- Setup Backup Sidecar metadata variables --
|
||||
#
|
||||
# # Variable for the image name (so that there can't be typos etc...)
|
||||
# BACKUP_SIDECAR_IMAGE_NAME="backup-sidecar"
|
||||
#
|
||||
# # Get the available tags from the image registry
|
||||
# IFS=' ' read -r -a BACKUP_SIDECAR_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME | jq -r '.Tags | @sh')
|
||||
#
|
||||
# # Get the latest tag from the list of tags
|
||||
# LATEST_BACKUP_SIDECAR_TAG=$(echo ${BACKUP_SIDECAR_TAGS[@]} | tr ' ' '\n' | sort -V | tail -n 1)
|
||||
# LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG#\'} # Remove leading quote
|
||||
# LATEST_BACKUP_SIDECAR_TAG=${LATEST_BACKUP_SIDECAR_TAG%\'} # Remove trailing quote
|
||||
# echo "Latest Backup Sidecar Tag: $LATEST_BACKUP_SIDECAR_TAG"
|
||||
#
|
||||
# # Get/Generate the "full" image name (including the tag) for the backup sidecar
|
||||
# FULL_BACKUP_SIDECAR_IMAGE_NAME="${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$BACKUP_SIDECAR_IMAGE_NAME:$LATEST_BACKUP_SIDECAR_TAG"
|
||||
# echo "Looking for backup sidecar: $FULL_BACKUP_SIDECAR_IMAGE_NAME"
|
||||
#
|
||||
# # -- END: Setup Backup Sidecar metadata variables --
|
||||
#
|
||||
# # Get the name of the running main pod (the one that has the backup sidecar container in it)
|
||||
# RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" '.items[] | select(.spec.containers[].image == $img) | .metadata.name')
|
||||
# echo "Running Main Pod: $RUNNING_MAIN_POD"
|
||||
#
|
||||
# # Variables for retry logic
|
||||
# MAX_RETRIES=5
|
||||
# RETRY_INTERVAL=30
|
||||
# RETRY_COUNT=0
|
||||
#
|
||||
# # Because Kubernetes (or at least our stagging cluster) can be a bit tempramental,
|
||||
# # We allow for "resetting" a few times.
|
||||
# # By resetting, we mean re-detecting the main running pod etc...
|
||||
# MAX_RESETS=3
|
||||
# RESET_COUNT=0
|
||||
#
|
||||
# POD_STATUS=$(kubectl -n ${{ env.NAMESPACE }} get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
|
||||
# echo "Pod Status: $POD_STATUS"
|
||||
#
|
||||
# # Retry (wait) a few times if the pod isn't running yet
|
||||
# while [ "$POD_STATUS" != "Running" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
|
||||
# echo "Pod is not running yet (Current Status: $POD_STATUS). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
|
||||
# sleep $RETRY_INTERVAL
|
||||
#
|
||||
# # Get the current status of the pod
|
||||
# POD_STATUS=$(kubectl -n ciam-account-dashboard get pod $RUNNING_MAIN_POD -o json | jq -r '.status.phase')
|
||||
#
|
||||
# # Increment the retry count
|
||||
# RETRY_COUNT=$((RETRY_COUNT + 1))
|
||||
#
|
||||
# # If the pod is in a failed state, we want to "reset"
|
||||
# # Though, we only want to reset a few times (to avoid infinite loops if something else is wrong etc...)
|
||||
# if [[ "$POD_STATUS" == "Failed" ]] && [ $RESET_COUNT -lt $MAX_RESETS ]; then
|
||||
# echo "Pod is in a failed state. Resetting..."
|
||||
#
|
||||
# # Reset the pod and increment the reset count
|
||||
# RUNNING_MAIN_POD=$(kubectl -n ${{ env.NAMESPACE }} get pods -o json | jq -r --arg img "$FULL_BACKUP_SIDECAR_IMAGE_NAME" --arg prev "$RUNNING_MAIN_POD" '.items[] | select(.spec.containers[].image == $img and .metadata.name != $prev) | .metadata.name')
|
||||
# echo "Running Main Pod Reset: $RUNNING_MAIN_POD"
|
||||
#
|
||||
# # Reset the retry count
|
||||
# RETRY_COUNT=0
|
||||
# echo "Retry Count Reset"
|
||||
#
|
||||
# RESET_COUNT=$((RESET_COUNT + 1))
|
||||
# fi
|
||||
# done
|
||||
#
|
||||
# if [ "$POD_STATUS" != "Running" ]; then
|
||||
# echo "Error: Pod did not start running after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
|
||||
# exit 1
|
||||
# fi
|
||||
#
|
||||
# LATEST_BACKUPS=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -sSL http://localhost:4000/backups/latest -o latest-backup.json && cat latest-backup.json")
|
||||
# echo "Latest Backups: $LATEST_BACKUPS"
|
||||
#
|
||||
# LATEST_DB_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.db')
|
||||
# echo "Latest DB Backup: $LATEST_DB_BACKUP"
|
||||
#
|
||||
# LATEST_VAULT_BACKUP=$(echo $LATEST_BACKUPS | jq -r '.backups.vault')
|
||||
# echo "Latest Vault Backup: $LATEST_VAULT_BACKUP"
|
||||
#
|
||||
# echo "Restoring Vault Backup: $LATEST_VAULT_BACKUP at restore time: ${{ env.RESTORE_TIME }}"
|
||||
# restore_output=$(kubectl -n ${{ env.NAMESPACE }} exec $RUNNING_MAIN_POD -c ${{ env.CONTAINER_NAME }} -- /bin/bash -c "curl -s -X POST -H 'Content-Type: application/json' -d '{ \"restoreTime\": \"${{ env.RESTORE_TIME }}\" }' http://localhost:4000/restore/latest -o restore-output.txt && cat restore-output.txt")
|
||||
# echo "Restore Output: $restore_output"
|
||||
#
|
||||
# # Verify that the site starts up after the restore
|
||||
# - name: Verify Restore
|
||||
# run: |
|
||||
# SITE_REQUEST_STATUS_CODE=$(curl -sSL https://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null)
|
||||
#
|
||||
# MAX_RETRIES=5
|
||||
# RETRY_INTERVAL=30
|
||||
# RETRY_COUNT=0
|
||||
#
|
||||
# # Retry (wait) a few times if the site isn't up yet
|
||||
# while [ "$SITE_REQUEST_STATUS_CODE" != "200" ] && [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
|
||||
# echo "Site is not up yet (Current Status Code: $SITE_REQUEST_STATUS_CODE). Waiting for $RETRY_INTERVAL seconds (attempt $((RETRY_COUNT + 1))/$MAX_RETRIES)..."
|
||||
# sleep $RETRY_INTERVAL
|
||||
#
|
||||
# # Get the current status of the site
|
||||
# SITE_REQUEST_STATUS_CODE=$(curl -sSL http://${{ env.SITE_HOSTNAME }} -w '%{http_code}' -o /dev/null)
|
||||
#
|
||||
# # Increment the retry count
|
||||
# RETRY_COUNT=$((RETRY_COUNT + 1))
|
||||
# done
|
||||
#
|
||||
# if [ "$SITE_REQUEST_STATUS_CODE" != "200" ]; then
|
||||
# echo "Error: Site did not start up after $((RETRY_COUNT * RETRY_INTERVAL)) seconds."
|
||||
# exit 1
|
||||
# fi
|
||||
#
|
||||
# echo "Site is up and running (Status Code: $SITE_REQUEST_STATUS_CODE)."
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
|
|
@ -2,8 +2,11 @@
|
|||
values.filled.yaml
|
||||
|
||||
# Exclude any packaged charts
|
||||
services-dashboard-*.tgz
|
||||
ba-services-*.tgz
|
||||
|
||||
# Ignore Helm dependencies themselves
|
||||
charts/
|
||||
|
||||
# Random other stuff
|
||||
templates.old
|
||||
*.old*
|
||||
input.json
|
||||
|
|
@ -2,14 +2,15 @@
|
|||
.helmignore
|
||||
|
||||
# Ignore the Helm chart's packaged tarball
|
||||
*.tgz
|
||||
ba-services-*.tgz
|
||||
|
||||
# Ignore git related files
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Ignore github automation files
|
||||
# Ignore Github automation files
|
||||
.github
|
||||
.forgejo
|
||||
|
||||
# Ignore inputs file
|
||||
input.json
|
||||
|
|
@ -17,5 +18,9 @@ input.json
|
|||
# Ignore the filled in values file
|
||||
values.filled.yaml
|
||||
|
||||
# Ignore the lock file for dependencies
|
||||
requirements.lock
|
||||
Chart.lock
|
||||
|
||||
# Ignore .old (backup) files
|
||||
*.old*
|
||||
18
Chart.lock
Normal file
18
Chart.lock
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
dependencies:
|
||||
- name: ba-custom-hashicorp-vault
|
||||
repository: https://helm.bridgemanaccessible.ca/
|
||||
version: 1.0.6
|
||||
- name: nosql-deploy
|
||||
repository: https://helm.bridgemanaccessible.ca/
|
||||
version: 1.0.3
|
||||
- name: db-deploy
|
||||
repository: https://helm.bridgemanaccessible.ca/
|
||||
version: 1.0.2
|
||||
- name: cache-deploy
|
||||
repository: https://helm.bridgemanaccessible.ca/
|
||||
version: 1.0.7
|
||||
- name: ba-logging-sidecar
|
||||
repository: https://helm.bridgemanaccessible.ca/
|
||||
version: 1.0.2
|
||||
digest: sha256:5e94a036b1b834edb75e456b4d2bb0b9242d8f65b8f4c6d58062004234a02005
|
||||
generated: "2025-12-24T10:43:44.4164896-06:00"
|
||||
32
Chart.yaml
32
Chart.yaml
|
|
@ -1,11 +1,37 @@
|
|||
apiVersion: v1
|
||||
apiVersion: v2
|
||||
appVersion: "1.0.298"
|
||||
description: A Helm chart for deploying Bridgeman Accessible's Services Dashboard
|
||||
home: https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca-helm
|
||||
maintainers:
|
||||
- email: info@bridgemanaccessible.ca
|
||||
name: Bridgeman Accessible
|
||||
name: services-dashboard
|
||||
name: ba-services
|
||||
sources:
|
||||
- https://github.com/Bridgeman-Accessible/services.bridgemanaccessible.ca
|
||||
version: "1.5.10"
|
||||
version: "2.0.0"
|
||||
dependencies:
|
||||
- name: ba-custom-hashicorp-vault
|
||||
version: 1.0.6
|
||||
repository: "https://helm.bridgemanaccessible.ca/"
|
||||
alias: vault
|
||||
condition: vault.enabled
|
||||
- name: nosql-deploy
|
||||
version: 1.0.3
|
||||
repository: "https://helm.bridgemanaccessible.ca/"
|
||||
alias: nosql
|
||||
condition: nosql.enabled
|
||||
- name: db-deploy
|
||||
version: 1.0.2
|
||||
repository: "https://helm.bridgemanaccessible.ca/"
|
||||
alias: database
|
||||
condition: database.enabled
|
||||
- name: cache-deploy
|
||||
version: 1.0.7
|
||||
repository: "https://helm.bridgemanaccessible.ca/"
|
||||
alias: cache
|
||||
condition: cache.enabled
|
||||
- name: ba-logging-sidecar
|
||||
version: 1.0.2
|
||||
repository: "https://helm.bridgemanaccessible.ca/"
|
||||
alias: loggingSidecar
|
||||
condition: loggingSidecar.enabled
|
||||
61
templates/_sidecars.tpl
Normal file
61
templates/_sidecars.tpl
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
{{- define "backupSidecar" -}}
|
||||
- name: {{ .Values.backupSidecar.name }}
|
||||
image: {{ .Values.backupSidecar.image.repository }}:{{ .Values.backupSidecar.image.tag }}
|
||||
imagePullPolicy: {{ .Values.backupSidecar.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.backupSidecar.port }}
|
||||
env:
|
||||
# Release name (used to identify the service/release the backups came from in remote storage)
|
||||
- name: RELEASE_NAME
|
||||
value: {{ .Release.Name }}
|
||||
{{- include "db.envVars" . | nindent 2 -}}
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
- name: VAULT_NAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-pass-vault-secret
|
||||
key: vault-name
|
||||
- name: VAULT_SNAPSHOT_SERVER_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-pass-vault-snapshot-config
|
||||
key: port
|
||||
{{- end }}
|
||||
{{- if eq .Values.nosql.type "mongodb" }}
|
||||
# NoSQL storage related environment variables
|
||||
# Note, we only worry about self-hosted options as cloud-based should have their own backups etc...
|
||||
- name: STORAGE_ACCOUNT_CONNECTION_STRING
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-mongo-credentials
|
||||
key: connection-string
|
||||
{{- end }}
|
||||
# Redis is used for BullMQ, which is how we schedule backups
|
||||
# We use this instead of, for instance cron jobs, as it lets us deal with failures
|
||||
{{- include "cache.envVars" . | nindent 2 }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .Values.backupSidecar.resources.requests.cpu }}
|
||||
memory: {{ .Values.backupSidecar.resources.requests.memory }}
|
||||
ephemeral-storage: {{ .Values.backupSidecar.resources.requests.ephemeralStorage }}
|
||||
limits:
|
||||
cpu: {{ .Values.backupSidecar.resources.limits.cpu }}
|
||||
memory: {{ .Values.backupSidecar.resources.limits.memory }}
|
||||
ephemeral-storage: {{ .Values.backupSidecar.resources.limits.ephemeralStorage }}
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
volumeMounts:
|
||||
# Mount for a shared volume for Vault credentials
|
||||
# This is separate from the app's `role vars` volume because it includes other credentials
|
||||
# In particular, the unseal keys which we require when/if we restore from the backup
|
||||
# This volume is also read-only where the `role-vars` is read-write (see description below for why)
|
||||
- name: creds
|
||||
mountPath: /vault-creds
|
||||
readOnly: true
|
||||
# Mount for a shared volume for the Vault's role variables for the app
|
||||
# This is required by the backup sidecar because if a restart of the app occurs AFTER a vault has been reset (ex. vault using a different container instance),
|
||||
# despite the vault data being restored the app would receive incorrect credentials (because this is ONLY written during setup of the vault)
|
||||
# The backup sidecar mitigates this by doing it's own write (to overwrite) once it's done a restore
|
||||
- name: role-vars
|
||||
mountPath: /role_vars
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
77
templates/_thirdParty.tpl
Normal file
77
templates/_thirdParty.tpl
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
{{- define "stripe.envVars" -}}
|
||||
# Stripe Environment Variables
|
||||
- name: STRIPE_PUBLIC_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: live-public-key
|
||||
- name: STRIPE_SECRET_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: live-secret-key
|
||||
- name: STRIPE_TEST_PUBLIC_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: test-public-key
|
||||
- name: STRIPE_TEST_SECRET_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: test-secret-key
|
||||
{{- end -}}
|
||||
|
||||
{{- define "moneris.envVars" -}}
|
||||
# Moneris Environment Variables
|
||||
- name: MONERIS_MERCHANT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: merchant-id
|
||||
- name: MONERIS_STORE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: store-id
|
||||
- name: MONERIS_HT_PROFILE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: ht-profile-id
|
||||
- name: MONERIS_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: app-id
|
||||
- name: MONERIS_APP_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: app-secret
|
||||
- name: MONERIS_TEST_MERCHANT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-merchant-id
|
||||
- name: MONERIS_TEST_STORE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-store-id
|
||||
- name: MONERIS_TEST_HT_PROFILE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-ht-profile-id
|
||||
- name: MONERIS_TEST_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-app-id
|
||||
- name: MONERIS_TEST_APP_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-app-secret
|
||||
{{- end -}}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cache-configmap
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) }}
|
||||
hostname: {{ .Release.Name }}-redis
|
||||
{{- else }}
|
||||
hostname: {{ .Values.cache.hostname }}
|
||||
{{- end }}
|
||||
port: {{ .Values.cache.port | quote }}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cache-credentials
|
||||
type: Opaque
|
||||
data:
|
||||
password: {{ .Values.cache.password | b64enc }}
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
|
||||
apiVersion: postgresql.org/v1
|
||||
kind: PostgresDatabase
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-db
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
dbName:
|
||||
envFrom:
|
||||
configMapKeyRef:
|
||||
- name: {{ .Release.Name }}-db-credentials
|
||||
namespace: postgres-controller
|
||||
key: db-name
|
||||
dbRoleName:
|
||||
envFrom:
|
||||
configMapKeyRef:
|
||||
- name: {{ .Release.Name }}-db-credentials
|
||||
namespace: postgres-controller
|
||||
key: db-user
|
||||
dbRolePassword:
|
||||
envFrom:
|
||||
secretKeyRef:
|
||||
- name: {{ .Release.Name }}-db-password
|
||||
namespace: postgres-controller
|
||||
key: password
|
||||
# Because we've adopted a "throw away"/"ephemeral"/"container-esk" approach to our database, we want it to be dropped/deleted when everything else is deleted.
|
||||
# This is because we re-create it and restore from a backup on every deploy.
|
||||
# Which helps keep the data current and reinforces the utility of the backup and restore systems.
|
||||
onDeletion:
|
||||
# Whether to drop the database when the resource is deleted
|
||||
dropDB: true
|
||||
# Whether to drop the role when the resource is deleted
|
||||
dropRole: true
|
||||
{{- if .Values.database.instance_id }}
|
||||
dbInstanceId: {{ .Values.database.instance_id }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
namespace: postgres-controller
|
||||
data:
|
||||
db-host: {{ .Values.database.host }}
|
||||
db-name: {{ .Values.database.name }}
|
||||
db-user: {{ .Values.database.user }}
|
||||
{{- if .Values.database.port }}
|
||||
db-port: {{ .Values.database.port | quote }}
|
||||
{{- else }}
|
||||
db-port: "5432"
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
data:
|
||||
db-host: {{ .Values.database.host }}
|
||||
db-name: {{ .Values.database.name }}
|
||||
db-user: {{ .Values.database.user }}
|
||||
{{- if .Values.database.port }}
|
||||
db-port: {{ .Values.database.port | quote }}
|
||||
{{- else }}
|
||||
db-port: "5432"
|
||||
{{- end }}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
{{- if and (eq .Values.database.type "postgres") (.Values.database.create) -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-db-password
|
||||
namespace: postgres-controller
|
||||
type: Opaque
|
||||
data:
|
||||
password: {{ .Values.database.password | b64enc }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-db-password
|
||||
type: Opaque
|
||||
data:
|
||||
password: {{ .Values.database.password | b64enc }}
|
||||
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
replicas: {{ .Values.app.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
|
|
@ -16,15 +16,15 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: {{ .Release.Name }}
|
||||
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
image: {{ .Values.app.image.repository }}:{{ .Values.app.image.tag }}
|
||||
imagePullPolicy: {{ .Values.app.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.container.port }}
|
||||
- containerPort: {{ .Values.app.container.port }}
|
||||
env:
|
||||
- name: NODE_ENV
|
||||
value: {{ .Values.container.env }}
|
||||
value: {{ .Values.app.container.env }}
|
||||
- name: PORT
|
||||
value: "{{ .Values.container.port }}"
|
||||
value: "{{ .Values.app.container.port }}"
|
||||
- name: DOMAIN
|
||||
value: "bridgemanaccessible.ca"
|
||||
- name: ACCOUNTS_DEV_PORT
|
||||
|
|
@ -57,51 +57,9 @@ spec:
|
|||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-oauth-credentials
|
||||
key: dev-port
|
||||
# Database credentials
|
||||
- name: DB_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-host
|
||||
- name: DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-name
|
||||
- name: DB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-db-password
|
||||
key: password
|
||||
- name: DB_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-port
|
||||
- name: DB_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-user
|
||||
# NoSQL Credentials
|
||||
{{- if eq .Values.nosql.type "mongodb" }}
|
||||
- name: STORAGE_ACCOUNT_CONNECTION_STRING
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-mongo-credentials
|
||||
key: connection-string
|
||||
{{- else if eq .Values.nosql.type "azure" }}
|
||||
- name: STORAGE_ACCOUNT_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-azure-tables-credentials
|
||||
key: key
|
||||
- name: STORAGE_ACCOUNT_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-azure-tables-config
|
||||
key: name
|
||||
{{- end }}
|
||||
{{- include "db.envVars" . | nindent 8 }}
|
||||
{{- include "cache.envVars" . | nindent 8 }}
|
||||
{{- include "nosql.envVars" . | nindent 8 }}
|
||||
# NoSQL Grouping Names
|
||||
- name: ACCESS_PROPERTIES_STORAGE_TABLE_NAME
|
||||
valueFrom:
|
||||
|
|
@ -169,22 +127,6 @@ spec:
|
|||
key: vault-port
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
# Caching Server Variables
|
||||
- name: CACHE_HOSTNAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-cache-configmap
|
||||
key: hostname
|
||||
- name: CACHE_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-cache-configmap
|
||||
key: port
|
||||
- name: CACHE_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-cache-credentials
|
||||
key: password
|
||||
# Email (Azure Communication Services API credentials, etc...)
|
||||
{{- if and (.Values.comms.email.enabled) (eq .Values.comms.email.type "acs") }}
|
||||
- name: EMAIL_CONNECTION_STRING
|
||||
|
|
@ -195,91 +137,23 @@ spec:
|
|||
{{- end }}
|
||||
# Third-Party Integrations
|
||||
{{- if .Values.thirdParty.stripe.enabled }}
|
||||
- name: STRIPE_PUBLIC_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: public-key
|
||||
- name: STRIPE_SECRET_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: secret-key
|
||||
- name: STRIPE_TEST_PUBLIC_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: test-public-key
|
||||
- name: STRIPE_TEST_SECRET_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-stripe-secret
|
||||
key: test-secret-key
|
||||
{{- /* This injects the YAML defined in the `_thirdParty.tpl` file */ -}}
|
||||
{{ include "stripe.envVars" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.thirdParty.moneris.enabled }}
|
||||
- name: MONERIS_MERCHANT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: merchant-id
|
||||
- name: MONERIS_STORE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: store-id
|
||||
- name: MONERIS_HT_PROFILE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: ht-profile-id
|
||||
- name: MONERIS_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: app-id
|
||||
- name: MONERIS_APP_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: app-secret
|
||||
- name: MONERIS_TEST_MERCHANT_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-merchant-id
|
||||
- name: MONERIS_TEST_STORE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-store-id
|
||||
- name: MONERIS_TEST_HT_PROFILE_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-ht-profile-id
|
||||
- name: MONERIS_TEST_APP_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-app-id
|
||||
- name: MONERIS_TEST_APP_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-moneris-secret
|
||||
key: test-app-secret
|
||||
{{- /* This injects the YAML defined in the `_thirdParty.tpl` file */ -}}
|
||||
{{ include "moneris.envVars" . | nindent 8 }}
|
||||
{{- end }}
|
||||
- name: INITIAL_USER_ID
|
||||
value: {{ .Values.initialUserID | quote }}
|
||||
# Logging Sidecar related environment variables
|
||||
{{- if .Values.loggingSidecar.enabled }}
|
||||
- name: LOGGING_SIDE_CAR_PORT
|
||||
value: {{ .Values.loggingSidecar.port | quote }}
|
||||
{{- include "logging.envVars" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.app.restoreFromBackup "" }}
|
||||
# Due to subtleties related to how the entrypoint scripts detects how/when to proceed
|
||||
# This environment variable indicates if the entrypoint should wait for a restore to complete
|
||||
{{- if ne .Values.container.restoreFromBackup "" }}
|
||||
- name: RESTORE_FROM_BACKUP
|
||||
value: {{ .Values.container.restoreFromBackup | quote }}
|
||||
value: {{ .Values.app.restoreFromBackup | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.vault.create.enabled }}
|
||||
volumeMounts:
|
||||
|
|
@ -292,120 +166,23 @@ spec:
|
|||
mountPath: /role_vars
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ .Values.app.resources.requests.cpu }}
|
||||
memory: {{ .Values.app.resources.requests.memory }}
|
||||
ephemeral-storage: {{ .Values.app.resources.requests.ephemeralStorage }}
|
||||
limits:
|
||||
cpu: {{ .Values.app.resources.limits.cpu }}
|
||||
memory: {{ .Values.app.resources.limits.memory }}
|
||||
ephemeral-storage: {{ .Values.app.resources.limits.ephemeralStorage }}
|
||||
# Logging sidecar for sending logs to a log aggregator
|
||||
{{- if .Values.loggingSidecar.enabled }}
|
||||
- name: {{ .Values.loggingSidecar.name }}
|
||||
image: {{ .Values.loggingSidecar.image.repository }}:{{ .Values.loggingSidecar.image.tag }}
|
||||
imagePullPolicy: {{ .Values.loggingSidecar.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.loggingSidecar.port }}
|
||||
env:
|
||||
- name: PORT
|
||||
value: {{ .Values.loggingSidecar.port | quote }}
|
||||
# Log aggregator (OpenObserve) auth variables
|
||||
- name: LOGGER_AUTH_USERNAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-logging-sidecar-credentials
|
||||
key: username
|
||||
- name: LOGGER_AUTH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-logging-sidecar-password
|
||||
key: password
|
||||
{{ include "logging.sidecar" . | nindent 6 }}
|
||||
{{- end }}
|
||||
# Backup sidecar for backing up service data
|
||||
{{- if .Values.backupSidecar.enabled }}
|
||||
- name: {{ .Values.backupSidecar.name }}
|
||||
image: {{ .Values.backupSidecar.image.repository }}:{{ .Values.backupSidecar.image.tag }}
|
||||
imagePullPolicy: {{ .Values.backupSidecar.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.backupSidecar.port }}
|
||||
env:
|
||||
# Release name (used to identify the service/release the backups came from in remote storage)
|
||||
- name: RELEASE_NAME
|
||||
value: {{ .Release.Name }}
|
||||
# Database related environment variables
|
||||
- name: DB_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-host
|
||||
- name: DB_NAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-name
|
||||
- name: DB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-db-password
|
||||
key: password
|
||||
- name: DB_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-port
|
||||
- name: DB_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-db-credentials
|
||||
key: db-user
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
- name: VAULT_NAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-vault-secret
|
||||
key: vault-name
|
||||
- name: VAULT_SNAPSHOT_SERVER_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-vault-snapshot-config
|
||||
key: port
|
||||
{{- end }}
|
||||
# NoSQL storage related environment variables
|
||||
# Note, we only worry about self-hosted options as cloud-based should have their own backups etc...
|
||||
{{- if eq .Values.nosql.type "mongodb" }}
|
||||
- name: STORAGE_ACCOUNT_CONNECTION_STRING
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-mongo-credentials
|
||||
key: connection-string
|
||||
{{- end }}
|
||||
# Redis related environment variables
|
||||
# Redis is used for BullMQ, which is how we schedule backups
|
||||
# We use this instead of, for instance cron jobs, as it lets us deal with failures
|
||||
- name: REDIS_HOSTNAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-cache-configmap
|
||||
key: hostname
|
||||
- name: REDIS_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-cache-configmap
|
||||
key: port
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-cache-credentials
|
||||
key: password
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
volumeMounts:
|
||||
# Mount for a shared volume for Vault credentials
|
||||
# This is separate from the app's `role vars` volume because it includes other credentials
|
||||
# In particular, the unseal keys which we require when/if we restore from the backup
|
||||
# This volume is also read-only where the `role-vars` is read-write (see description below for why)
|
||||
- name: creds
|
||||
mountPath: /vault-creds
|
||||
readOnly: true
|
||||
# Mount for a shared volume for the Vault's role variables for the app
|
||||
# This is required by the backup sidecar because if a restart of the app occurs AFTER a vault has been reset (ex. vault using a different container instance),
|
||||
# despite the vault data being restored the app would receive incorrect credentials (because this is ONLY written during setup of the vault)
|
||||
# The backup sidecar mitigates this by doing it's own write (to overwrite) once it's done a restore
|
||||
- name: role-vars
|
||||
mountPath: /role_vars
|
||||
{{- end }}
|
||||
{{- /* This injects the YAML defined in the `_sidecar.tpl` file */ -}}
|
||||
{{ include "backupSidecar" . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.vault.create.enabled }}
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -1,10 +0,0 @@
|
|||
{{- if .Values.loggingSidecar.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-logging-sidecar-credentials
|
||||
labels:
|
||||
app: {{ .Values.loggingSidecar.name }}
|
||||
data:
|
||||
username: {{ .Values.loggingSidecar.auth.username | quote }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
{{- if .Values.loggingSidecar.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-logging-sidecar-password
|
||||
labels:
|
||||
app: {{ .Values.loggingSidecar.name }}
|
||||
type: opaque
|
||||
data:
|
||||
password: {{ .Values.loggingSidecar.auth.password | b64enc | quote }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
{{- if eq .Values.nosql.type "mongodb" -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-mongo-credentials
|
||||
type: Opaque
|
||||
data:
|
||||
user: {{ .Values.nosql.user | b64enc }}
|
||||
password: {{ .Values.nosql.password | b64enc }}
|
||||
{{- if and (.Values.nosql.connectionString) (not .values.nosql.create) }}
|
||||
connection-string: {{ .Values.nosql.connectionString | b64enc }}
|
||||
{{- else if .Values.nosql.create }}
|
||||
connection-string: {{ printf "mongodb://%s:%s@%s-mongo-svc.%s.svc.cluster.local:27017/%s?replicaSet=%s-mongo" .Values.nosql.user .Values.nosql.password .Release.Name .Release.Namespace .Values.nosql.name .Release.Name | b64enc }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: mongodb-database
|
||||
namespace: {{ .Release.Namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: mongodb-database
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: mongodb-database
|
||||
namespace: {{ .Release.Namespace }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: mongodb-database
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- patch
|
||||
- delete
|
||||
- get
|
||||
{{- end -}}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: mongodb-database
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
{{- if and (eq .Values.nosql.type "mongodb") (.Values.nosql.create) -}}
|
||||
apiVersion: mongodbcommunity.mongodb.com/v1
|
||||
kind: MongoDBCommunity
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-mongo
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
members: {{ .Values.nosql.replicaCount }}
|
||||
type: ReplicaSet
|
||||
version: 4.4.0
|
||||
security:
|
||||
authentication:
|
||||
ignoreUnknownUsers: true
|
||||
modes:
|
||||
- SCRAM
|
||||
tls:
|
||||
enabled: {{ .Values.nosql.tls.enabled }}
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
users:
|
||||
- name: {{ .Values.nosql.user }}
|
||||
db: {{ .Values.nosql.name }}
|
||||
passwordSecretRef:
|
||||
name: {{ .Release.Name }}-mongo-credentials
|
||||
key: password
|
||||
roles:
|
||||
- name: readWrite
|
||||
db: {{ .Values.nosql.name }}
|
||||
scramCredentialsSecretName: {{ .Release.Name }}-mongo-scram
|
||||
{{- end -}}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-nosql-grouping
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
data:
|
||||
access-properties: {{ .Values.nosql.grouping.accessProperties }}
|
||||
locales: {{ .Values.nosql.grouping.locales }}
|
||||
order-properties: {{ .Values.nosql.grouping.orderProperties }}
|
||||
price-properties: {{ .Values.nosql.grouping.priceProperties }}
|
||||
service-category-properties: {{ .Values.nosql.grouping.serviceCategoryProperties }}
|
||||
service-properties: {{ .Values.nosql.grouping.serviceProperties }}
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-redis
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
replicas: {{ .Values.cache.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- name: redis
|
||||
image: {{ .Values.cache.image.repository | default "bitnami/redis" }}:{{ .Values.cache.image.tag | default "7.0.5" }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.cache.port }}
|
||||
{{- if .Values.cache.tls.enabled }}
|
||||
- containerPort: {{ .Values.cache.tls.port }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: ALLOW_EMPTY_PASSWORD
|
||||
value: "false"
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-cache-credentials
|
||||
key: password
|
||||
- name: REDIS_DISABLE_COMMANDS
|
||||
value: "FLUSHDB,FLUSHALL"
|
||||
# TLS configuration
|
||||
#- name: REDIS_TLS_ENABLED
|
||||
# value: "{{ .Values.cache.tls.enabled }}"
|
||||
#- name: REDIS_TLS_AUTH_CLIENTS
|
||||
# value: "yes"
|
||||
#- name: REDIS_TLS_PORT_NUMBER
|
||||
# value: "{{ .Values.cache.tls.port }}"
|
||||
volumeMounts:
|
||||
- name: redis-data
|
||||
mountPath: /bitnami/redis
|
||||
volumes:
|
||||
- name: redis-data
|
||||
emptyDir: {}
|
||||
{{- end -}}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
{{- if and (eq .Values.cache.type "redis") (.Values.cache.create) -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-redis
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: {{ .Values.cache.port }}
|
||||
targetPort: {{ .Values.cache.port }}
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
{{- end -}}
|
||||
|
|
@ -8,4 +8,4 @@ spec:
|
|||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: {{ .Values.container.port }}
|
||||
targetPort: {{ .Values.app.container.port }}
|
||||
|
|
|
|||
|
|
@ -1,15 +0,0 @@
|
|||
{{- if .Values.vault.create.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault-creds
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
spec:
|
||||
storageClassName: {{ .Values.vault.create.storage.storageClass }}
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.vault.create.storage.size }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
{{- if and (.Values.vault.create.enabled) (eq .Values.vault.type "hashicorp") -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-vault
|
||||
image: {{ .Values.vault.create.image.repository }}:{{ .Values.vault.create.image.tag }}
|
||||
ports:
|
||||
- containerPort: 8200
|
||||
- containerPort: 8201
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
- containerPort: {{ .Values.vault.create.snapshotServer.internalPort }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: VAULT_ADDR
|
||||
value: http://0.0.0.0:8200
|
||||
- name: POLICY_CAPABILITIES
|
||||
value: {{ .Values.vault.create.policyCapabilities | join "," }}
|
||||
- name: ROLE_ID_SECRET_NAME
|
||||
value: VAULT_ROLE_ID
|
||||
- name: SECRET_ID_SECRET_NAME
|
||||
value: VAULT_SECRET_ID
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
- name: SNAPSHOT_SERVER_PORT
|
||||
value: {{ .Values.vault.create.snapshotServer.internalPort | quote }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: vault-data
|
||||
mountPath: /vault/data
|
||||
- name: vault-log
|
||||
mountPath: /vault/logs
|
||||
- name: vault-creds
|
||||
mountPath: /vault/creds
|
||||
- name: vault-role-vars
|
||||
mountPath: /role_vars
|
||||
capAdd:
|
||||
- IPC_LOCK
|
||||
volumes:
|
||||
- name: vault-data
|
||||
emptyDir: {}
|
||||
- name: vault-log
|
||||
emptyDir: {}
|
||||
- name: vault-creds
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Release.Name }}-vault-creds
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: vault-role-vars
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Release.Name }}-vault-role-vars
|
||||
{{- end -}}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
{{- if and (.Values.vault.enabled) (eq .Values.vault.type "hashicorp") -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault-secret
|
||||
type: opaque
|
||||
data:
|
||||
{{- if .Values.vault.create.enabled }}
|
||||
# Because we create the Hashicorp Vault instance as part of the Helm chart,
|
||||
# we can use the name of the created resource (utilizing k8s built-in container connections)
|
||||
# to connect to the Vault instance without having to hard-code the Vault name.
|
||||
vault-name: {{ printf "%s-vault" .Release.Name | b64enc }}
|
||||
# Because we create the Hashicorp Vault instance as part of the Helm chart,
|
||||
# We know the port that the Vault instance is running on.
|
||||
vault-port: {{ printf "%d" 80 | b64enc }}
|
||||
{{- else }}
|
||||
# Because the Vault wasn't created as part of the Helm chart,
|
||||
# we need the deployer to specify the name of the Vault instance to connect to.
|
||||
vault-name: {{ .Values.vault.vaultName | b64enc }}
|
||||
# Because the Vault wasn't created as part of the Helm chart,
|
||||
# we need the deployer to specify the port that the Vault instance is running on.
|
||||
vault-port: {{ .Values.passVault.vaultPort | b64enc }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
{{- if .Values.vault.create.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault-ingress
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- host: {{ .Values.vault.create.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: {{ .Release.Name }}-vault
|
||||
port:
|
||||
number: 80
|
||||
{{- end -}}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
{{- if .Values.vault.create.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault-role-vars
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
spec:
|
||||
storageClassName: {{ .Values.vault.create.storage.storageClass }}
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.vault.create.storage.size }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
{{- if .Values.vault.create.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault
|
||||
labels:
|
||||
app: {{ .Release.Name }}-vault
|
||||
spec:
|
||||
selector:
|
||||
app: {{ .Release.Name }}-vault
|
||||
ports:
|
||||
{{- if .Values.vault.create.snapshotServer.enabled }}
|
||||
- name: custom-snapshot-server
|
||||
protocol: TCP
|
||||
port: {{ .Values.vault.create.snapshotServer.externalPort }}
|
||||
targetPort: {{ .Values.vault.create.snapshotServer.internalPort }}
|
||||
{{- end }}
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: 8200
|
||||
{{- end -}}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
{{- if .Values.vault.create.snapshotServer.enabled -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vault-snapshot-config
|
||||
data:
|
||||
port: {{ .Values.vault.create.snapshotServer.externalPort | quote }}
|
||||
{{- end -}}
|
||||
217
values.yaml
217
values.yaml
|
|
@ -1,6 +1,9 @@
|
|||
# Configuration for the Services Dashboard itself
|
||||
app:
|
||||
# The number of instances (replicas) of the app to run
|
||||
replicaCount: 1
|
||||
|
||||
# Configurations around the image that is used for the Services Dashboard
|
||||
image:
|
||||
# The repository of the image to use for the app
|
||||
# Should be in the format `<Image Repository (Ex. containers.example.com)>/<Image Name (Ex. app)>`
|
||||
|
|
@ -23,6 +26,16 @@ container:
|
|||
# This is used for the NODE_ENV environment variable
|
||||
env: "production"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
ephemeralStorage: 50Mi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 512Mi
|
||||
ephemeralStorage: 1Gi
|
||||
|
||||
# The timestamp of the backup that the entrypoint script should wait for a restore to complete
|
||||
restoreFromBackup: ""
|
||||
|
||||
|
|
@ -50,17 +63,15 @@ oauth:
|
|||
devPort: "<Dev Port>"
|
||||
|
||||
# Configuration for the relational database
|
||||
# See the [Database Deployment Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/db-deploy-helm) for more information
|
||||
database:
|
||||
# If a relational database should be used
|
||||
enabled: true
|
||||
|
||||
# This override tells the helper: "Ignore the alias (Chart Name - `database`), use this string for K8s resources instead"
|
||||
nameOverride: "db"
|
||||
|
||||
# The type of the relational database that is used.
|
||||
#
|
||||
# The following table lists the possible values for this field:
|
||||
#
|
||||
# | Value | Description |
|
||||
# | ---------- | ------------------------------------------ |
|
||||
# | `postgres` | Uses PostgreSQL as the relational database |
|
||||
#
|
||||
# Note, for use of `postgres`, it uses a [`postgres-controller` CRD](https://github.com/AlanBridgeman/postgres-controller) to create the database
|
||||
#
|
||||
type: "postgres"
|
||||
|
||||
# If set to `true`, the database will be created as part of the deployment
|
||||
|
|
@ -79,35 +90,67 @@ database:
|
|||
# The password that is used to access the database
|
||||
password: "<DB Password>"
|
||||
|
||||
# The port that the database listens on
|
||||
#port: 5432
|
||||
|
||||
# Allows for distinguishing between multiple database instances/servers
|
||||
#instance_id: ""
|
||||
|
||||
# Configurations for the secrets vault
|
||||
# See the [Customized Hashicorp Vault Implementation - Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/custom-hashicorp-vault-helm) for more information
|
||||
vault:
|
||||
# If a secrets vault should be used
|
||||
# That is, if a dedicated software for secret management should be used
|
||||
# This should virtually always be true if storing any kind of sensitive information as it's the most secure option
|
||||
enabled: true
|
||||
|
||||
# The type of secrets vault to use.
|
||||
# The type of secrets vault (or storage if `enabled` is `false`) to use.
|
||||
#
|
||||
# Vaults
|
||||
# ------
|
||||
# The following table lists the supported vault types:
|
||||
#
|
||||
# | Type | Description |
|
||||
# | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
# | `hashicorp` | [Customized Hashicorp Vault Implementation - Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/custom-hashicorp-vault-helm) |
|
||||
# | `azure` | Uses Azure Key Vault (Required fields: `vaultName`, `clientId`, `clientSecret`, `tenantId`) |
|
||||
#
|
||||
# Storage
|
||||
# -------
|
||||
# The following table lists the supported storage types.
|
||||
# These are methods OUTSIDE of a dedicated "vault" software.
|
||||
# These are generally **discouraged** as they are less secure.
|
||||
#
|
||||
# | Type | Description | Current Status | Required Fields |
|
||||
# | ----------- | -------------------- | -------------- | --------------------------------------------------- |
|
||||
# | `hashicorp` | Uses Hashicorp Vault | Implemented | `vaultName` (if `create` not true) |
|
||||
# | `azure` | Uses Azure Key Vault | Implemented | `vaultName`, `clientId`, `clientSecret`, `tenantId` |
|
||||
# | ----------- | ------------------------------- | -------------- | ---------------- |
|
||||
# | `file` | Uses a file | To-Do | `path` |
|
||||
# | `mem` | Uses in-memory (no persistance) | To-Do | N/A |
|
||||
#
|
||||
type: "<Vault Type>"
|
||||
|
||||
# The name of the vault instance to connect to
|
||||
#
|
||||
# This is relevant if type is set to `hashicorp` or `azure`
|
||||
# Note, if `create` is true this is ignored
|
||||
#
|
||||
# For `hashicorp`, see subchart's `values.yaml` for details
|
||||
# For `azure`, this is the name of the Azure Key Vault instance to connect to
|
||||
#vaultName: "<Vault Name>"
|
||||
|
||||
# The client ID of the Azure Key Vault instance
|
||||
#
|
||||
# ONLY RELEVANT IF `type` IS SET TO `azure`
|
||||
#client-id: <Azure Key Vault Client ID>
|
||||
|
||||
# The client secret of the Azure Key Vault instance
|
||||
#
|
||||
# ONLY RELEVANT IF `type` IS SET TO `azure`
|
||||
#client-secret: <Azure Key Vault Client Secret>
|
||||
|
||||
# The tenant ID of the Azure Key Vault instance
|
||||
#
|
||||
# ONLY RELEVANT IF `type` IS SET TO `azure`
|
||||
#tenant-id: <Azure Key Vault Tenant ID>
|
||||
|
||||
# Configurations to create a Hashicorp Vault instance as part of the Helm chart
|
||||
#
|
||||
# THIS IS ONLY RELEVANT IF `type` IS SET TO `hashicorp`
|
||||
#
|
||||
# See subchart's `values.yaml` for more information
|
||||
create:
|
||||
# If a Hashicorp Vault instance should be created as part of the Helm chart
|
||||
enabled: <true/false>
|
||||
|
|
@ -141,13 +184,12 @@ vault:
|
|||
# The internal port that the snapshot server listens on
|
||||
internalPort: 8300
|
||||
|
||||
# Configurations for the storage of the created Hashicorp Vault instance
|
||||
storage:
|
||||
# The storage class to use for the created Hashicorp Vault instance's Persistent Volume Claim
|
||||
storageClass: <Storage Class>
|
||||
appRole:
|
||||
# The name of the environment variable/secret that contains the Role ID for the AppRole used by the app
|
||||
roleIDSecretName: VAULT_ROLE_ID
|
||||
|
||||
# The size of the created Hashicorp Vault instance's Persistent Volume Claim
|
||||
size: <Storage Size>
|
||||
# The name of the environment variable/secret that contains the Secret ID for the AppRole used by the app
|
||||
secretIDSecretName: VAULT_SECRET_ID
|
||||
|
||||
# The capabilities of the policy to create for the app
|
||||
policyCapabilities:
|
||||
|
|
@ -155,74 +197,22 @@ vault:
|
|||
- create
|
||||
- update
|
||||
|
||||
# The name of the vault instance to connect to
|
||||
#
|
||||
# This is relevant if type is set to `hashicorp` or `azure`
|
||||
# Note, if `create` is true this is ignored
|
||||
#
|
||||
# For `hashicorp`, this is generally the hostname of the Hashicorp Vault instance to connect to
|
||||
# For `azure`, this is the name of the Azure Key Vault instance to connect to
|
||||
#vaultName: "<Vault Name>"
|
||||
|
||||
# The port of the vault instance to connect to
|
||||
#
|
||||
# ONLY RELEVANT iF `type` IS SET TO `hashicorp` AND `create` IS NOT TRUE
|
||||
#vaultPort: <Vault Port>
|
||||
|
||||
# The client ID of the Azure Key Vault instance
|
||||
#
|
||||
# ONLY RELEVANT IF `type` IS SET TO `azure`
|
||||
#client-id: <Azure Key Vault Client ID>
|
||||
|
||||
# The client secret of the Azure Key Vault instance
|
||||
#
|
||||
# ONLY RELEVANT IF `type` IS SET TO `azure`
|
||||
#client-secret: <Azure Key Vault Client Secret>
|
||||
|
||||
# The tenant ID of the Azure Key Vault instance
|
||||
#
|
||||
# ONLY RELEVANT IF `type` IS SET TO `azure`
|
||||
#tenant-id: <Azure Key Vault Tenant ID>
|
||||
|
||||
# Configuration the NoSQL database
|
||||
# Within the parlance of the system these are often called "properties" databases (and store less structured data)
|
||||
# See the [NoSQL Deployment Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/nosql-deploy-helm) for more information
|
||||
nosql:
|
||||
# If a NoSQL database should be used
|
||||
enabled: true
|
||||
|
||||
# Determines the type of NoSQL storage that is used
|
||||
#
|
||||
# The following table lists the possible values for this field:
|
||||
#
|
||||
# | Value | Description |
|
||||
# | --------- | ------------------------------------------------------------------------------------------ |
|
||||
# | `mongodb` | Uses MongoDB as the NoSQL database for the default account properties database |
|
||||
# | `azure` | Uses Azure Table Storage as the NoSQL database for the default account properties database |
|
||||
#
|
||||
type: <NoSQL Storage Type>
|
||||
|
||||
# If to create a resource as part of the deployment process
|
||||
# ONLY relevant if `type` is set to `mongodb`
|
||||
# This uses the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) to create the resource
|
||||
create: false
|
||||
|
||||
# The number of replicas/members as part of the Mongo deployment
|
||||
# See the `member` parameter of the [MongoDBCommunity CRD](https://github.com/mongodb/mongodb-kubernetes-operator) for more information
|
||||
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `true`
|
||||
#replicaCount: <Number of replicas>
|
||||
|
||||
# The TLS configuration for the connection to the NoSQL database
|
||||
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `true`
|
||||
tls:
|
||||
# If to use TLS for the connection to the NoSQL database
|
||||
create:
|
||||
# If to create a resource as part of the deployment process
|
||||
enabled: <true/false>
|
||||
|
||||
# The connection string used to access the NoSQL database
|
||||
# ONLY relevant if `type` is set to `mongodb` and `create` is set to `false`
|
||||
# Should be in the following format: `mongodb://<hostname>:<port>`
|
||||
#connectionString: "mongodb://mongo.example.com:27017"
|
||||
|
||||
# The key used to access the NoSQL database
|
||||
# ONLY relevant if `type` is set to `azure`
|
||||
#key: ""
|
||||
|
||||
# The name of the NoSQL database
|
||||
name: "<NoSQL DB Name>"
|
||||
|
||||
|
|
@ -240,40 +230,28 @@ nosql:
|
|||
grouping:
|
||||
<Table Intermediate Name>: "<NoSQL Table Name>"
|
||||
|
||||
# Configuration for cache server
|
||||
# Configuration for Redis cache
|
||||
# See the [Cache Deployment Helm Chart](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/cache-deploy-helm) for more information
|
||||
cache:
|
||||
# If a cache (Redis) should be used
|
||||
enabled: true
|
||||
|
||||
# This override tells the helper: "Ignore the alias (Chart Name - `cache`), use this string for K8s resources instead"
|
||||
nameOverride: "redis"
|
||||
|
||||
type: "redis"
|
||||
|
||||
# Configurations if creating Redis resources as part of the deployment
|
||||
create:
|
||||
# If to create a Redis instance/resource as part of the deployment process
|
||||
create: false
|
||||
enabled: true
|
||||
|
||||
# The image to use for the Redis instance
|
||||
# ONLY relevant if `create` is set to `true`
|
||||
image: {}
|
||||
|
||||
# The number of replicas of the Redis instance
|
||||
# ONLY relevant if `create` is set to `true`
|
||||
#replicaCount: <Number of replicas (Ex. 1)>
|
||||
|
||||
# Hostname of the Redis server
|
||||
# ONLY relevant if `create` is set to `false`
|
||||
#hostName: "<Redis Host Name>"
|
||||
redisData:
|
||||
size: 2Gi
|
||||
|
||||
# The password to use for the Redis server
|
||||
password: "<Redis Password>"
|
||||
|
||||
# The port of the Redis server
|
||||
port: "<Redis Port>"
|
||||
|
||||
# Redis TLS Configurations
|
||||
tls:
|
||||
# If TLS is enabled for the Redis instance
|
||||
enabled: false
|
||||
|
||||
# The port of the Redis instance for TLS
|
||||
# ONLY relevant if `tls.enabled` is set to `true`
|
||||
#port: "<TLS Port (Ex. 6380)>"
|
||||
|
||||
# Configurations for communication services/relays
|
||||
comms:
|
||||
# Configurations for Email
|
||||
|
|
@ -362,6 +340,9 @@ loggingSidecar:
|
|||
# If the logging sidecar should be used (enabled)
|
||||
enabled: true
|
||||
|
||||
# This override tells the helper: "Ignore the alias (Chart Name - `loggingSidecar`), use this string for K8s resources instead"
|
||||
nameOverride: "logging-sidecar"
|
||||
|
||||
# The image used for the logging sidecar
|
||||
image:
|
||||
repository: "<repository>/<Logging Sidecar Image Name>"
|
||||
|
|
@ -379,6 +360,16 @@ loggingSidecar:
|
|||
username: "<Log Aggregator (OpenObserve) Username>"
|
||||
password: "<Log Aggregator (OpenObserve) Password>"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 128Mi
|
||||
ephemeralStorage: 50Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
ephemeralStorage: 200Mi
|
||||
|
||||
# Configuration for the backup sidecar
|
||||
backupSidecar:
|
||||
# If the backup sidecar should be used (enabled)
|
||||
|
|
@ -395,3 +386,13 @@ backupSidecar:
|
|||
|
||||
# The port that the backup sidecar listens on
|
||||
port: 3001
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 128Mi
|
||||
ephemeralStorage: 50Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
ephemeralStorage: 200Mi
|
||||
Loading…
Add table
Add a link
Reference in a new issue