Initial code commit + initial automation stuff + etc...
All checks were successful
Deploy the Helm Chart / build (push) Successful in 7s

This commit is contained in:
Alan Bridgeman 2025-12-18 22:23:37 -06:00
parent 6f24ea7563
commit f40cbdf6dd
12 changed files with 462 additions and 1 deletions

View file

@ -0,0 +1,149 @@
name: Deploy the Helm Chart
on:
push:
branches:
- main
workflow_dispatch:
jobs:
# "Build" from the source code
build:
runs-on: self-hosted
outputs:
chart-name: ${{ steps.update-helm-repo.outputs.CHART_NAME }}
steps:
# Get the source code from the repository
- name: Checkout repository
uses: actions/checkout@v4
# "Build"/package the source code int the appropriate format (Helm chart etc...)
- name: Update Helm Repository (if needed)
id: update-helm-repo
run: |
# Parse the chart name from the Chart.yaml
CHART_NAME=$(yq '.name' Chart.yaml)
CHART_NAME=${CHART_NAME#\"} # Remove leading quote
CHART_NAME=${CHART_NAME%\"} # Remove trailing quote
echo "CHART_NAME=$CHART_NAME" >> $GITHUB_OUTPUT
echo "Chart Name: $CHART_NAME"
# Note, this depends on the [Harbor Helm Index](https://git.bridgemanaccessible.ca/Bridgeman-Accessible/harbor-helm-index) pretty heavily
# In particular, that tool allows us to treat what is an OCI registry as a Helm repository (which includes using `helm search repo`)
#helm repo add BridgemanAccessible https://helm.bridgemanaccessible.ca
# Check if the chart is already in the repository or not
#REMOTE_CHART_WORK_OUTPUT=$(helm search repo BridgemanAccessible/$CHART_NAME 2>/dev/null || echo "")
#if [ -n "$REMOTE_CHART_WORK_OUTPUT" ]; then
# # The chart is already in the repository, so we need to check if the version is the same or not
# echo "Chart already exists in the repository. Checking version..."
#
# IFS=' ' read -r -a REMOTE_VERSIONS <<< $(helm search repo BridgemanAccessible/$CHART_NAME --output json | jq '.[].version | @sh')
#
# echo "Remote Chart Versions: ${REMOTE_VERSIONS[@]}"
#else
# # The chart is not in the repository, so we'll need to add it
# echo "Chart not found in the repository. Adding it..."
#
# # Set a blank value so that it WON'T match the local version
# IFS=' ' read -r -a REMOTE_VERSIONS <<< ""
#fi
# Just to keep things clean/safe/etc... remove the repo
#helm repo remove BridgemanAccessible
# Get the local version from the Chart.yaml file
LOCAL_VERSION=$(yq '.version' Chart.yaml)
LOCAL_VERSION=${LOCAL_VERSION#\"} # Remove leading quote
LOCAL_VERSION=${LOCAL_VERSION%\"} # Remove trailing quote
echo "Local Chart Version: $LOCAL_VERSION"
#has_match='false'
#if [ ${#REMOTE_VERSIONS[@]} -gt 0 ]; then
# # Loop through the remote tags and check if any of them match the local version
# for REMOTE_VERSION in ${REMOTE_VERSIONS[@]}; do
# REMOTE_VERSION=${REMOTE_VERSION#\'} # Remove leading quote
# REMOTE_VERSION=${REMOTE_VERSION%\'} # Remove trailing quote
#
# # Check if the remote tag is the same as the local tag
# if [ "$REMOTE_VERSION" == "$LOCAL_VERSION" ]; then
# echo "Remote version matches local version!"
# has_match='true'
# break
# fi
# done
#fi
# If the versions match, we want to increment the chart's patch version
#if [ "has_match" ]; then
# echo "Versions match!"
#
# # Increment the patch version of the local version (Ex. 1.0.0 -> 1.0.1)
# IFS='.' read -r major minor patch <<< "$LOCAL_VERSION"
# patch=$((patch + 1))
# NEW_LOCAL_VERSION="$major.$minor.$patch"
#
# echo "New Local Version: $NEW_LOCAL_VERSION"
# echo "Committing new chart version change..."
#
# sed -i "s|version: \"$LOCAL_VERSION\"|version: \"$NEW_LOCAL_VERSION\"|g" Chart.yaml
#
# LOCAL_VERSION=$NEW_LOCAL_VERSION
#
# # Update remote URL to use the GITHUB_TOKEN for authentication
# git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@git.bridgemanaccessible.ca/${{ github.repository }}.git
#
# # Setup git user details for committing the version change and tag
# git config user.name "Forgejo Actions"
# git config user.email "actions@git.bridgemanaccessible.ca"
#
# # Commit the version change to the `package.json` file
# git add Chart.yaml
# git commit -m "[Forgejo Actions] Update Helm chart version to $(yq -r '.version' Chart.yaml)"
#
# # Push the changes to the repository
# git push origin HEAD:main
#else
# echo "Versions do not match!"
#fi
#helm package .
#helm push ./$CHART_NAME-$LOCAL_VERSION.tgz oci://${{ secrets.REPOSITORY_HOSTNAME }}/helm
# Deploy to our environment (stagging cluster)
#deploy:
# runs-on: self-hosted
# needs: build
# outputs:
# backup-created: ${{ steps.backup.outputs.created }}
# restore-time: ${{ steps.deploy-helm-chart.outputs.RESTORE_TIME }}
# env:
# NAMESPACE: custom-vault
# RELEASE_NAME: custom-vault
# CHART_NAME: ${{ needs.build.outputs.chart-name }}
# steps:
# # Deploy the resources to the cluster
# - name: Deploy Helm Chart
# id: deploy-helm-chart
# run: |
# FILLED_VALUES_FILE="values.filled.yaml"
#
# # Download a filled version of the `values.yaml` file from a secure location
# DOWNLOAD_FILE=$(curl -sSL https://secure-storage.bridgemanaccessible.ca/services-dashboard/values.filled.yaml?token=${{ secrets.SECURE_STORAGE_TOKEN }} -o $FILLED_VALUES_FILE || echo "Failed to download filled values file.")
# if [ "$DOWNLOAD_FILE" == "Failed to download filled values file." ]; then
# echo "Error: $DOWNLOAD_FILE"
# exit 1
# fi
#
# # Parse the chart name from the Chart.yaml
# CHART_NAME=${{ env.CHART_NAME }}
# echo "Chart Name: $CHART_NAME"
#
# # Can run `k8s-deploy --help` if you want to see all the options available
# k8s-deploy \
# --namespace ${{ env.NAMESPACE }} \
# --release-name ${{ env.RELEASE_NAME }} \
# --filled-values-file $FILLED_VALUES_FILE \
# --chart-name $CHART_NAME \
# --rwx-volumes vault-role-vars

11
Chart.yaml Normal file
View file

@ -0,0 +1,11 @@
apiVersion: v1
appVersion: "1.0.14"
description: A Helm chart for deploying Bridgeman Accessible Customized Hashicorp Implementation
home: https://git.bridgemanaccessible.ca/Bridgeman-Accessible/custom-hashicorp-vault-helm
maintainers:
- email: info@bridgemanaccessible.ca
name: Bridgeman Accessible
name: ba-custom-hashicorp-vault
sources:
- https://git.bridgemanaccessible.ca/Bridgeman-Accessible/custom-hashicorp-vault
version: "1.0.0"

View file

@ -1,2 +1,13 @@
# Customized Hashicorp Vault Implementation - Helm Chart # Customized Hashicorp Vault Implementation - Helm Chart
This repository is meant to hold the helm chart to be able to deploy the customized Hashicorp Vault implementation to a Kubernetes cluster using the Helm software. This repository is meant to hold the helm chart to be able to deploy the customized Hashicorp Vault implementation to a Kubernetes cluster using the Helm software.
## Including as a dependency
To use this chart as a dependency:
```yaml
dependencies:
- name: ba-custom-hashicorp-vault
version: 1.0.0
repository: "https://charts.bridgemanaccessible.ca/"
condition: vault.enabled
```

View file

@ -0,0 +1,15 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-vault-creds
labels:
app: {{ .Release.Name }}-vault
spec:
storageClassName: vault-role-vars-rook-cephfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 512Mi
{{- end -}}

View file

@ -0,0 +1,24 @@
{{- if and (.Values.vault.enabled) (eq .Values.vault.type "hashicorp") -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-vault-secret
type: opaque
data:
{{- if .Values.vault.create.enabled }}
# Because we create the Hashicorp Vault instance as part of the Helm chart,
# we can use the name of the created resource (utilizing k8s built-in container connections)
# to connect to the Vault instance without having to hard-code the Vault name.
vault-name: {{ printf "%s-vault" .Release.Name | b64enc }}
# Because we create the Hashicorp Vault instance as part of the Helm chart,
# We know the port that the Vault instance is running on.
vault-port: {{ printf "%d" 80 | b64enc }}
{{- else }}
# Because the Vault wasn't created as part of the Helm chart,
# we need the deployer to specify the name of the Vault instance to connect to.
vault-name: {{ .Values.vault.vaultName | b64enc }}
# Because the Vault wasn't created as part of the Helm chart,
# we need the deployer to specify the port that the Vault instance is running on.
vault-port: {{ .Values.vault.vaultPort | b64enc }}
{{- end }}
{{- end -}}

View file

@ -0,0 +1,21 @@
{{- if .Values.vault.create.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}-vault-ingress
labels:
app: {{ .Release.Name }}-vault
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.vault.create.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}-vault
port:
number: 80
{{- end -}}

View file

@ -0,0 +1,12 @@
{{- if and (.Values.vault.enabled) (eq .Values.vault.type "azure") -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-vault-secret
type: opaque
data:
client-id: {{ .Values.vault.clientId | b64enc }}
client-secret: {{ .Values.vault.clientSecret | b64enc}}
name: {{ .Values.vault.vaultName | b64enc }}
tenant-id: {{ .Values.vault.tenantId | b64enc }}
{{- end -}}

View file

@ -0,0 +1,15 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-vault-role-vars
labels:
app: {{ .Release.Name }}-vault
spec:
storageClassName: vault-role-vars-rook-cephfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 512Mi
{{- end -}}

View file

@ -0,0 +1,22 @@
{{- if .Values.vault.create.enabled -}}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-vault
labels:
app: {{ .Release.Name }}-vault
spec:
selector:
app: {{ .Release.Name }}-vault
ports:
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: custom-snapshot-server
protocol: TCP
port: {{ .Values.vault.create.snapshotServer.externalPort }}
targetPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- end }}
- name: http
protocol: TCP
port: 80
targetPort: 8200
{{- end -}}

View file

@ -0,0 +1,8 @@
{{- if .Values.vault.create.snapshotServer.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-vault-snapshot-config
data:
port: {{ .Values.vault.create.snapshotServer.externalPort | quote }}
{{- end -}}

View file

@ -0,0 +1,78 @@
{{- if and (.Values.vault.create.enabled) (eq .Values.vault.type "hashicorp") -}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-vault
labels:
app: {{ .Release.Name }}-vault
spec:
serviceName: {{ .Release.Name }}-vault
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-vault
template:
metadata:
labels:
app: {{ .Release.Name }}-vault
spec:
# Because the Vault process runs with a non-root user inside the container,
# we need to set the fsGroup to ensure that the mounted volumes have the correct permissions
securityContext:
fsGroup: 1000
containers:
- name: {{ .Release.Name }}-vault
image: {{ .Values.vault.create.image.repository }}:{{ .Values.vault.create.image.tag }}
ports:
- containerPort: 8200
- containerPort: 8201
{{- if .Values.vault.create.snapshotServer.enabled }}
- containerPort: {{ .Values.vault.create.snapshotServer.internalPort }}
{{- end }}
env:
- name: VAULT_ADDR
value: http://0.0.0.0:8200
- name: ROLE_ID_SECRET_NAME
value: USER_PASSWORD_DEFAULT_VAULT_ROLE_ID
- name: SECRET_ID_SECRET_NAME
value: USER_PASSWORD_DEFAULT_VAULT_SECRET_ID
{{- if .Values.vault.create.snapshotServer.enabled }}
- name: SNAPSHOT_SERVER_PORT
value: {{ .Values.vault.create.snapshotServer.internalPort | quote }}
{{- end }}
volumeMounts:
- name: vault-data
mountPath: /vault/data
- name: vault-log
mountPath: /vault/logs
- name: vault-creds
mountPath: /vault/creds
- name: vault-role-vars
mountPath: /role_vars
capAdd:
- IPC_LOCK
volumes:
- name: vault-log
emptyDir: {}
- name: vault-creds
{{- if .Values.vault.create.snapshotServer.enabled }}
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-creds
{{- else }}
emptyDir: {}
{{- end }}
- name: vault-role-vars
persistentVolumeClaim:
claimName: {{ .Release.Name }}-vault-role-vars
# To have data consistency across pod restarts, we use a volume claim template
# We do this instead of a static PVC because we require less control and it's easier to manage
volumeClaimTemplates:
- metadata:
name: vault-data
spec:
storageClassName: rook-ceph-block
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
{{- end -}}

95
values.yaml Normal file
View file

@ -0,0 +1,95 @@
# Configurations for the vault
vault:
# If a password "vault" should be used
# That is, if a dedicated software for secret management should be used
# This should virtually always be true as it's the most secure option
enabled: true
# The type of password "vault" (or storage if `enabled` is `false`) to use.
#
# Vaults
# ------
# The following table lists the supported "vault" types:
#
# | Type | Description | Current Status | Required Fields |
# | ----------- | -------------------- | -------------- | --------------------------------------------------- |
# | `hashicorp` | Uses Hashicorp Vault | Implemented | `vaultName` (if `create` not true) |
# | `azure` | Uses Azure Key Vault | Implemented | `vaultName`, `clientId`, `clientSecret`, `tenantId` |
#
# Storage
# -------
# The following table lists the supported storage types.
# These are methods OUTSIDE of a dedicated "vault" software.
# These are generally **discouraged** as they are less secure.
#
# | Type | Description | Current Status | Required Fields |
# | ----------- | ------------------------------- | -------------- | ---------------- |
# | `file` | Uses a file | To-Do | `path` |
# | `mem` | Uses in-memory (no persistance) | To-Do | N/A |
#
type: 'hashicorp'
# The name of the vault instance to connect to
#
# This is relevant if type is set to `hashicorp` or `azure`
# Note, if `create` is true this is ignored
#
# For `hashicorp`, this is generally the hostname of the Hashicorp Vault instance to connect to
# For `azure`, this is the name of the Azure Key Vault instance to connect to
#vaultName: ''
# The port of the vault instance to connect to
#
# ONLY RELEVANT iF `type` IS SET TO `hashicorp` AND `create` IS NOT TRUE
#vaultPort: 8200
# The client ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-id: ""
# The client secret of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#client-secret: ""
# The tenant ID of the Azure Key Vault instance
#
# ONLY RELEVANT IF `type` IS SET TO `azure`
#tenant-id: ""
# Configurations to create a Hashicorp Vault instance as part of the Helm chart
#
# THIS IS ONLY RELEVANT IF `type` IS SET TO `hashicorp`
create:
# If a Hashicorp Vault instance should be created as part of the Helm chart
enabled: true
# Configurations for the image to use if creating the Hashicorp Vault instance
# as part of the Helm chart
image:
# The repository of the image to use
repository: <redacted-private-repo>/ba-custom-hashicorp-vault
# The tag of the image to use
tag: latest
# Configurations for the ingress of the created Hashicorp Vault instance
ingress:
# If an ingress should be created for the created Hashicorp Vault instance
enabled: true
# The host of the ingress for the created Hashicorp Vault instance
host: vault.<redacted-private-hostname>
# As part of the custom Hashicorp Vault image it includes a "snapshot server"
# This allows for the triggering and receiving of manual backups ("snapshots") and restoration via HTTP
snapshotServer:
# If the snapshot server should be enabled/running
enabled: true
# The external port that is opened via the service resource
externalPort: 81
# The internal port that the snapshot server listens on
internalPort: 8300