Initial code commit
This commit is contained in:
parent
cf72c0fd43
commit
75d003a3be
12 changed files with 1203 additions and 0 deletions
122
.github/workflows/deploy.yml
vendored
Normal file
122
.github/workflows/deploy.yml
vendored
Normal file
|
|
@ -0,0 +1,122 @@
|
||||||
|
name: Build and deploy Bridgeman Accessible Hashicorp Vault Implementation
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: self-hosted
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# Build a new container image from the code
|
||||||
|
- name: Update Image
|
||||||
|
run: |
|
||||||
|
# Parse information from the metadata.yaml file
|
||||||
|
IMAGE_NAME=$(yq '.name' metadata.yaml)
|
||||||
|
IMAGE_NAME=${IMAGE_NAME#\"} # Remove leading quote
|
||||||
|
IMAGE_NAME=${IMAGE_NAME%\"} # Remove trailing quote
|
||||||
|
echo "Image Name: $IMAGE_NAME"
|
||||||
|
echo "IMAGE_NAME=$IMAGE_NAME" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
LOCAL_VERSION=$(yq '.version' metadata.yaml)
|
||||||
|
LOCAL_VERSION=${LOCAL_VERSION#\"} # Remove leading quote
|
||||||
|
LOCAL_VERSION=${LOCAL_VERSION%\"} # Remove trailing quote
|
||||||
|
echo "Image Local Version: $LOCAL_VERSION"
|
||||||
|
|
||||||
|
REMOTE_TAGS_WORK_OUTPUT=$(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$IMAGE_NAME 2>/dev/null || echo "")
|
||||||
|
if [ -n "$REMOTE_TAGS_WORK_OUTPUT" ]; then
|
||||||
|
IFS=' ' read -r -a REMOTE_TAGS <<< $(skopeo list-tags docker://${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$IMAGE_NAME | jq -r '.Tags | @sh')
|
||||||
|
else
|
||||||
|
echo "Image not found in the repository. Will need to add it..."
|
||||||
|
|
||||||
|
# Set a blank value so that it WON'T match the local version
|
||||||
|
IFS=' ' read -r -a REMOTE_TAGS <<< ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Remote Tags (number: ${#REMOTE_TAGS[@]}): ${REMOTE_TAGS[@]}"
|
||||||
|
|
||||||
|
has_match='false'
|
||||||
|
if [ ${#REMOTE_TAGS[@]} -gt 0 ]; then
|
||||||
|
# Loop through the remote tags and check if any of them match the local version
|
||||||
|
for REMOTE_TAG in ${REMOTE_TAGS[@]}; do
|
||||||
|
REMOTE_TAG=${REMOTE_TAG#\'} # Remove leading quote
|
||||||
|
REMOTE_TAG=${REMOTE_TAG%\'} # Remove trailing quote
|
||||||
|
|
||||||
|
# Check if the remote tag is the same as the local tag
|
||||||
|
if [ "$REMOTE_TAG" == "v$LOCAL_VERSION" ]; then
|
||||||
|
echo "Remote version matches local version!"
|
||||||
|
has_match='true'
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If a remote tag that matches the local version already exists, increment the local version's patch version
|
||||||
|
if [ "$has_match" == 'true' ]; then
|
||||||
|
echo "Because the remote version matches the local version, we need to increment the local version's patch number."
|
||||||
|
|
||||||
|
# Increment the patch version of the local version (Ex. 1.0.0 -> 1.0.1)
|
||||||
|
IFS='.' read -r major minor patch <<< "$LOCAL_VERSION"
|
||||||
|
patch=$((patch + 1))
|
||||||
|
NEW_LOCAL_VERSION="$major.$minor.$patch"
|
||||||
|
|
||||||
|
echo "New Local Version: $NEW_LOCAL_VERSION"
|
||||||
|
echo "Committing container version change..."
|
||||||
|
|
||||||
|
sed -i "s|version: $LOCAL_VERSION|version: $NEW_LOCAL_VERSION|g" metadata.yaml
|
||||||
|
|
||||||
|
LOCAL_VERSION=$NEW_LOCAL_VERSION
|
||||||
|
|
||||||
|
# Update remote URL to use the GITHUB_TOKEN for authentication
|
||||||
|
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
|
||||||
|
|
||||||
|
# Setup git user details for committing the version change and tag
|
||||||
|
git config user.name "GitHub Actions"
|
||||||
|
git config user.email "actions@github.com"
|
||||||
|
|
||||||
|
# Commit the version change to the `package.json` file
|
||||||
|
git add metadata.yaml
|
||||||
|
git commit -m "[Github Actions] Update container version to $(jq -r '.version' package.json)"
|
||||||
|
|
||||||
|
# Push the changes to the repository
|
||||||
|
git push origin HEAD:main
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build and push the init container image to the repository
|
||||||
|
docker build -t ${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$IMAGE_NAME:v$LOCAL_VERSION .
|
||||||
|
docker push ${{ secrets.REPOSITORY_HOSTNAME }}/k8s/$IMAGE_NAME:v$LOCAL_VERSION
|
||||||
|
|
||||||
|
# Note, this is the version NOT the tag
|
||||||
|
# This is because the `update-k8s-deployment-image` script automatically prepends the `v` to the version
|
||||||
|
echo "CONTAINER_IMAGE_VERSION=$LOCAL_VERSION" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Update vault images in various deployments
|
||||||
|
run: |
|
||||||
|
# Update the Hashicorp Vault instance for the Account Dashboard
|
||||||
|
update-k8s-deployment-image \
|
||||||
|
--image-version ${{ env.CONTAINER_IMAGE_VERSION }} \
|
||||||
|
--namespace ciam-account-dashboard \
|
||||||
|
--deployment-name ciam-account-pass-vault \
|
||||||
|
--container-name account-pass-vault \
|
||||||
|
--image-name ${{ secrets.REPOSITORY_HOSTNAME }}/k8s/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
# Update the Hashicopr Vault instance for the Services Dashboard
|
||||||
|
#update-k8s-deployment-image \
|
||||||
|
# --image-version ${{ env.CONTAINER_IMAGE_VERSION }} \
|
||||||
|
# --namespace ciam-services-dashboard \
|
||||||
|
# --deployment-name ciam-services-vault \
|
||||||
|
# --container-name services-vault \
|
||||||
|
# --image-name ${{ secrets.REPOSITORY_HOSTNAME }}/k8s/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
# Update the Hashicorp Vault instance for the Accessible Events Platform (AEP)
|
||||||
|
#update-k8s-deployment-image \
|
||||||
|
# --image-version ${{ env.CONTAINER_IMAGE_VERSION }} \
|
||||||
|
# --namespace aep \
|
||||||
|
# --deployment-name aep-vault \
|
||||||
|
# --container-name aep-vault \
|
||||||
|
# --image-name ${{ secrets.REPOSITORY_HOSTNAME }}/k8s/${{ env.IMAGE_NAME }}
|
||||||
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
# Environment Variables
|
||||||
|
.env
|
||||||
36
Dockerfile
Normal file
36
Dockerfile
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
FROM hashicorp/vault:latest
|
||||||
|
|
||||||
|
# Install Bash
|
||||||
|
RUN apk add --no-cache --upgrade bash
|
||||||
|
|
||||||
|
# Install python/pip (needed to run some of the initialization logic)
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python
|
||||||
|
RUN python3 -m venv .venv \
|
||||||
|
&& source .venv/bin/activate \
|
||||||
|
&& python -m ensurepip \
|
||||||
|
&& pip install --no-cache --upgrade pip setuptools
|
||||||
|
|
||||||
|
# Needed for parsing JSON in Bash (which is needed to parse the unseal keys and root token)
|
||||||
|
RUN apk add --no-cache jq
|
||||||
|
|
||||||
|
# Copy the Vault configuration file into the container
|
||||||
|
COPY vault-config.hcl /vault/config/vault-config.hcl
|
||||||
|
|
||||||
|
# Copy the startup script into the container (also verifying it's encoded properly)
|
||||||
|
COPY ./entrypoint.sh /entrypoint.sh
|
||||||
|
RUN dos2unix /entrypoint.sh
|
||||||
|
|
||||||
|
# Copy the Python startup stuff into the container
|
||||||
|
COPY ./setup-scripts /setup-scripts
|
||||||
|
|
||||||
|
# Copy the snapshot server Python code into the container
|
||||||
|
COPY ./snapshot-server /snapshot-server
|
||||||
|
|
||||||
|
# | Port | Purpose |
|
||||||
|
# | ---- | ------------------------------------------------------------------- |
|
||||||
|
# | 8200 | Vault API |
|
||||||
|
# | 8300 | Custom snapshot server (for creating and serving backups over HTTP) |
|
||||||
|
EXPOSE 8200 8300
|
||||||
|
|
||||||
|
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||||
88
README.md
Normal file
88
README.md
Normal file
|
|
@ -0,0 +1,88 @@
|
||||||
|
# Customized Hashicorp Vault Implementation
|
||||||
|
This is a customized implementation of the Hashicorp Vault image.
|
||||||
|
|
||||||
|
This augments the standard Hashicorp Vault image in 2 main ways:
|
||||||
|
1. It sets up the Hashicorp Vault in production mode but sets it up with an app authentication and unseals the vault so that the app can interact with it.
|
||||||
|
2. It adds a "snapshot server" which allows serving and receiving vault backups/snapshots over HTTP
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
First, you need to clone the repostiory:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/...
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, you'll need to create a/the `.env` file (you can use the `.env.example` as a template)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cp .env.example .env
|
||||||
|
```
|
||||||
|
|
||||||
|
```sh
|
||||||
|
VAULT_NAME=pass-vault.identity.bridgemanaccessible.ca
|
||||||
|
```
|
||||||
|
|
||||||
|
## Volumes
|
||||||
|
For the purposes of this piece there are two types of volumes:
|
||||||
|
- [Host Mounted Volumes](#host-mounted-volumes)
|
||||||
|
- [Shared Volumes](#shared-volumes)
|
||||||
|
|
||||||
|
These are detailed bleow
|
||||||
|
|
||||||
|
### Host Mounted Volumes
|
||||||
|
There are a number of volumes used that are intended to be mounted within the host system.
|
||||||
|
This is so that these files can be viewed or backed up easily by administrators rather than having to dive into the containers file system itself.
|
||||||
|
|
||||||
|
These Host Mounted Volumes include:
|
||||||
|
- [Configuration Directory](#configuration-directory-vaultconfig)
|
||||||
|
- [Logs Directory](#logs-directory-vaultlogs)
|
||||||
|
- [Data Directory](#data-directory-vaultdata)
|
||||||
|
- [Backup/Creds Directory](#backupcreds-directory-vaultcreds)
|
||||||
|
|
||||||
|
#### Configuration Directory (`/vault/config`)
|
||||||
|
This is where configuration files for the vault go.
|
||||||
|
In general, this doesn't require a lot of monitoring, maintenance or updating on a regular basis.
|
||||||
|
It only needs to be updated when a change to the configuration of the vault is happening/desired.
|
||||||
|
|
||||||
|
#### Logs Directory (`/vault/logs`)
|
||||||
|
This is where log files used for tasks like debugging are intended to go.
|
||||||
|
In general, the files in this directory are helpful but not thought of as critical.
|
||||||
|
|
||||||
|
#### Data Directory (`/vault/data`)
|
||||||
|
This is where all the actual data goes and therefore is a critical directory.
|
||||||
|
This directory should be backed up and monitored as if something happens to it the vault won't function or restart properly.
|
||||||
|
|
||||||
|
#### Backup/Creds Directory (`/vault/creds`)
|
||||||
|
This is where "backup" files are kept.
|
||||||
|
More specifically, things like a copy of the "root token" and "unseal keys" are kept here.
|
||||||
|
This is because these values aren't as easily available after initialization but can become required.
|
||||||
|
To that end, it's important to ensure this directory isn't shared too openly because these should be relatively confidential values.
|
||||||
|
|
||||||
|
### Shared Volumes
|
||||||
|
Unlike the [Host Mounted Volumes](#host-mounted-volumes) these volumes are intended to be shared across containers.
|
||||||
|
That is, are more geared for program access rather than human access.
|
||||||
|
|
||||||
|
You can add these using Docker Compose:
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
volumes:
|
||||||
|
- hashicorp-vault_role-vars:/role_vars
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
hashicorp-vault_role-vars:
|
||||||
|
external: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Replacing `app` and `hashicorp-vault_role-vars` with the appropriate values.
|
||||||
|
|
||||||
|
Note, the volume's "full name" will likely be the name of the directory followed by an underscore followed by the volumes name.
|
||||||
|
Take the example above, the volume name is `role-vars` but the Docker compose is in and was run in the `hashicorp-vault` directory so the "full name" became `hashicorp-vault_role-vars`.
|
||||||
|
|
||||||
|
If your unsure you can check using `docker volume list` which lists all the volumes.
|
||||||
|
|
||||||
|
These Shared Volumes include:
|
||||||
|
- [Role Vars](#role-vars-role_vars)
|
||||||
|
|
||||||
|
#### Role Vars (`/role_vars`)
|
||||||
|
This is where values that allow apps to use their appropriate "App Roles" are stored to be shared with the app.
|
||||||
48
docker-compose.yml
Normal file
48
docker-compose.yml
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
services:
|
||||||
|
# -------------------------
|
||||||
|
# Secrets/Secure Storage
|
||||||
|
#
|
||||||
|
# Mostly used for passwords
|
||||||
|
# -------------------------
|
||||||
|
pass-vault:
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
container_name: accounts-dashboard_pass-vault
|
||||||
|
hostname: ${USER_PASSWORD_DEFAULT_VAULT_NAME}
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
#VAULT_DEV_ROOT_TOKEN_ID: ${USER_PASSWORD_DEFAULT_VAULT_TOKEN}
|
||||||
|
#VAULT_DEV_LISTEN_ADDRESS: '0.0.0.0:8200'
|
||||||
|
VAULT_ADDR: 'http://0.0.0.0:8200'
|
||||||
|
# Because we use a volume for the config we don't specify the config here
|
||||||
|
#VAULT_LOCAL_CONFIG: '{"storage": {"file": {"path": "/vault/file"}}}'
|
||||||
|
ROLE_ID_SECRET_NAME: USER_PASSWORD_DEFAULT_VAULT_ROLE_ID
|
||||||
|
SECRET_ID_SECRET_NAME: USER_PASSWORD_DEFAULT_VAULT_SECRET_ID
|
||||||
|
cap_add:
|
||||||
|
- IPC_LOCK
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "vault", "status"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
ports:
|
||||||
|
- 8200:8200
|
||||||
|
volumes:
|
||||||
|
- ${CONFIG_DIR}:/vault/config
|
||||||
|
- ${LOGS_DIR}:/vault/logs
|
||||||
|
- ${DATA_DIR}:/vault/data
|
||||||
|
- ${CREDS_DIR}:/vault/creds
|
||||||
|
- role-vars:/role_vars
|
||||||
|
networks:
|
||||||
|
#- accounts_default
|
||||||
|
- vaults
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
role-vars:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
#accounts_default:
|
||||||
|
# external: true
|
||||||
|
vaults:
|
||||||
|
driver: bridge
|
||||||
139
entrypoint.sh
Normal file
139
entrypoint.sh
Normal file
|
|
@ -0,0 +1,139 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# |*******************************************************************|
|
||||||
|
# | Setup script |
|
||||||
|
# | |
|
||||||
|
# | Author: Alan Bridgeman |
|
||||||
|
# | Created: 2024-03-30 |
|
||||||
|
# | |
|
||||||
|
# | COPYRIGHT © 2024 Bridgeman Accessible/Alan Bridgeman. |
|
||||||
|
# | |
|
||||||
|
# | This work is presented AS IS, with no warranty of any kind. |
|
||||||
|
# | Any modification or use of this script is at the user's own risk. |
|
||||||
|
# |*******************************************************************|
|
||||||
|
|
||||||
|
# The `entrypoint.sh` script is responsible for setting up the Vault server.
|
||||||
|
# It enables the AppRole auth method, creates a policy, and a role.
|
||||||
|
# It then retrieves the role_id and secret_id and stores them in a file (`/role_vars/.env`).
|
||||||
|
|
||||||
|
# Start and wait for the vault to get started
|
||||||
|
start_and_wait_for_vault() {
|
||||||
|
echo "+----------------+"
|
||||||
|
echo "| Starting Vault |"
|
||||||
|
echo "+----------------+"
|
||||||
|
|
||||||
|
# Start the vault server (in the background)
|
||||||
|
vault server -config=/vault/config/vault-config.hcl & #-dev &
|
||||||
|
|
||||||
|
echo "+------------------------------+"
|
||||||
|
echo "| Wait for Vault to be Started |"
|
||||||
|
echo "+------------------------------+"
|
||||||
|
|
||||||
|
# Wait for the vault server to start
|
||||||
|
retries=0
|
||||||
|
poll_lock=false
|
||||||
|
while [ "$poll_lock" = false ] && [ $retries -lt 10 ]; do
|
||||||
|
# Sleep for 5 seconds (give some time before check and re-checking)
|
||||||
|
# Note, we put this at the top of the loop so that there is no extra delay once we get the status
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
echo "Attempt $((retries + 1)) to check if vault has started"
|
||||||
|
|
||||||
|
# Attempt to get the vault status
|
||||||
|
vault status
|
||||||
|
|
||||||
|
# Check if the exit code of the `vault status` command (last command) is 0 (success)
|
||||||
|
poll_lock=$(test $? -ne 1 && echo "true" || echo "false")
|
||||||
|
|
||||||
|
# Increment the retries counter
|
||||||
|
retries=$((retries + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
# If the vault server did not start, exit with an error
|
||||||
|
if [ "$poll_lock" = false ]; then
|
||||||
|
echo "Failed to start vault server"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to initialize vault (inculding creating the root token and unseal keys)
|
||||||
|
#init_vault() {
|
||||||
|
# echo "*----------------------*"
|
||||||
|
# echo "| Initialization Vault |"
|
||||||
|
# echo "*----------------------*"
|
||||||
|
#
|
||||||
|
# # Initialize the vault
|
||||||
|
# init_output=$(vault operator init -format=json)
|
||||||
|
#
|
||||||
|
# # Parse the unseal keys and root token from the initialization response
|
||||||
|
# unseal_keys=$(echo $init_output | jq -r '.unseal_keys_b64[]')
|
||||||
|
# root_token=$(echo $init_output | jq -r '.root_token')
|
||||||
|
#
|
||||||
|
# mkdir /vault/creds
|
||||||
|
# touch /vault/creds/unseal-keys && echo "$unseal_keys" > /vault/creds/unseal-keys
|
||||||
|
# touch /vault/creds/root-token && echo "$root_token" > /vault/creds/root-token
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Function to check if the vault is unsealed
|
||||||
|
#is_vault_unsealed() {
|
||||||
|
# seal_status=$(vault status -format=json | jq -r '.sealed')
|
||||||
|
# if [[ $seal_status == "false" ]]; then
|
||||||
|
# return 0
|
||||||
|
# else
|
||||||
|
# return 1
|
||||||
|
# fi
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Function to unseal the vault
|
||||||
|
#unseal_vault() {
|
||||||
|
# echo "*-----------------*"
|
||||||
|
# echo "| Unsealing Vault |"
|
||||||
|
# echo "*-----------------*"
|
||||||
|
#
|
||||||
|
# # Use each key to unseal the vault
|
||||||
|
# for key in $unseal_keys; do
|
||||||
|
# vault operator unseal $key
|
||||||
|
#
|
||||||
|
# # If the vault is now unsealed break/escape from the loop
|
||||||
|
# if is_vault_unsealed; then
|
||||||
|
# echo "Vault is unsealed"
|
||||||
|
# break
|
||||||
|
# fi
|
||||||
|
# done
|
||||||
|
#}
|
||||||
|
|
||||||
|
# Function to setup the secrets engine
|
||||||
|
#setup_secrets_engine() {
|
||||||
|
# echo "*---------------------------*"
|
||||||
|
# echo "| Setting up secrets engine |"
|
||||||
|
# echo "*---------------------------*"
|
||||||
|
#
|
||||||
|
# vault login $root_token
|
||||||
|
# vault secrets enable -path secret kv
|
||||||
|
#}
|
||||||
|
|
||||||
|
#setup_app_role_access() {
|
||||||
|
# echo "*----------------------------*"
|
||||||
|
# echo "| Setting up App Role access |"
|
||||||
|
# echo "*----------------------------*"
|
||||||
|
#
|
||||||
|
# # Run the custom entrypoint Python script
|
||||||
|
# python3 /entrypoint.py $root_token
|
||||||
|
#}
|
||||||
|
|
||||||
|
start_and_wait_for_vault
|
||||||
|
|
||||||
|
python3 /setup-scripts/prod-setup.py
|
||||||
|
|
||||||
|
#init_vault
|
||||||
|
#unseal_vault
|
||||||
|
#setup_secrets_engine
|
||||||
|
#setup_app_role_access
|
||||||
|
|
||||||
|
# Start the snapshot server in the background
|
||||||
|
# This is a custom server that is used to manually trigger and then return the snapshot over HTTP
|
||||||
|
python3 /snapshot-server/server.py > /var/log/snapshot-server.log 2>&1 &
|
||||||
|
|
||||||
|
# Keep the container running
|
||||||
|
# By "following" the log file
|
||||||
|
tail -f /vault/logs/vault-audit.log
|
||||||
8
metadata.yaml
Normal file
8
metadata.yaml
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
# The purpose of this file is similar to a `package.json` for a JavaScript project or `Chart.yaml` for a Helm chart etc...
|
||||||
|
# It contains metadata about the project, such as its name, version, and description. This information is used by the deployment process to build and deploy the container image.
|
||||||
|
# We decided to use a YAML file (instead of JSON as an example) because it allows the use of comments, which can be helpful for documentation/explanation purposes like this.
|
||||||
|
|
||||||
|
name: hashicorp-vault
|
||||||
|
version: 1.0.12
|
||||||
|
description: Customized implementation of the Hashicorp Vault image.
|
||||||
|
maintainer: Bridgeman Accessible <info@bridgemanaccessible.ca>
|
||||||
93
setup-scripts/CommandRunner.py
Normal file
93
setup-scripts/CommandRunner.py
Normal file
|
|
@ -0,0 +1,93 @@
|
||||||
|
import os, sys, subprocess, select
|
||||||
|
from typing import List, Tuple
|
||||||
|
|
||||||
|
class CommandRunner:
|
||||||
|
@staticmethod
|
||||||
|
def run_command(command: str | List[str], check=True) -> Tuple[int, str, str]:
|
||||||
|
"""Run a command on the system and return the output
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command (str | List[str]): The command to run
|
||||||
|
check (bool, optional): If the command should raise an exception if it fails (`check=` for `subprocess.run`). Defaults to True.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[int, str, str]: The return code of the command, the output of the command (on standard out), and the error output of the command (on standard error)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Copy of the environment variables
|
||||||
|
new_env = os.environ.copy()
|
||||||
|
|
||||||
|
if not check:
|
||||||
|
# If the check isn't set, leave it out of the `subprocess.run`` call
|
||||||
|
# This means no exception will be raised if the command fails
|
||||||
|
result = subprocess.run(command, env=new_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||||
|
return result.returncode, result.stdout.decode('utf-8').strip(), result.stderr.decode('utf-8')
|
||||||
|
else:
|
||||||
|
# Run the command and raise an exception if it fails
|
||||||
|
try:
|
||||||
|
result = subprocess.run(command, env=new_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, check=True)
|
||||||
|
return result.returncode, result.stdout.decode('utf-8').strip(), result.stderr.decode('utf-8').strip()
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
# Log the error and re-raise or handle as appropriate
|
||||||
|
raise RuntimeError(f"Command '{command}' failed with error: {e.stderr}") from e
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_command_in_real_time(command: str | List[str]) -> Tuple[int, str, str]:
|
||||||
|
"""Similar to `run_command` but prints the output of the command in real-time
|
||||||
|
|
||||||
|
Args:
|
||||||
|
command (str | List[str]): The command to run
|
||||||
|
Returns:
|
||||||
|
tuple[int, str, str]: The return code of the command, the output of the command (on standard out), and the error output of the command (on standard error)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Variables to store the output of the command
|
||||||
|
stdout_lines: list[str] = []
|
||||||
|
stderr_lines: list[str] = []
|
||||||
|
|
||||||
|
# Copy of the environment variables
|
||||||
|
my_env = os.environ.copy()
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
# We use a `with` statement to ensure the process is closed properly
|
||||||
|
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, shell=True) as proc:
|
||||||
|
# Call `poll()` initially because we want to emulate a do-while loop
|
||||||
|
return_code = proc.poll()
|
||||||
|
|
||||||
|
# Loop until the process is finished
|
||||||
|
# In theory this will loop about every second (timeout on the select call)
|
||||||
|
# Or slightly faster if the process outputs something
|
||||||
|
while(return_code is None):
|
||||||
|
# Because `readline` block until it gets a line,
|
||||||
|
# But the executed command ISN'T guaranteed to output a line every time
|
||||||
|
# We use `select` to check if there's something to read
|
||||||
|
# It Waits for 1 second or for the process to output something
|
||||||
|
rlist, _, _ = select.select([proc.stdout.fileno(), proc.stderr.fileno()], [], [], 1)
|
||||||
|
|
||||||
|
# There was something to read from the process's stdout
|
||||||
|
if proc.stdout.fileno() in rlist:
|
||||||
|
# Read the line from the process
|
||||||
|
stdout_line = proc.stdout.readline()
|
||||||
|
|
||||||
|
# Add the line to the cumulative output
|
||||||
|
stdout_lines.append(stdout_line.decode('utf-8').strip())
|
||||||
|
|
||||||
|
# Print the output in real-time
|
||||||
|
print(stdout_line.decode('utf-8').strip())
|
||||||
|
|
||||||
|
# There was something to read from the process's stderr
|
||||||
|
if proc.stderr.fileno() in rlist:
|
||||||
|
# Read the line from the process
|
||||||
|
stderr_line = proc.stderr.readline()
|
||||||
|
|
||||||
|
# Add the line to the cumulative output
|
||||||
|
stderr_lines.append(stderr_line.decode('utf-8').strip())
|
||||||
|
|
||||||
|
# Print the error output of the command in real-time to stderr
|
||||||
|
print(stderr_line.decode('utf-8').strip(), file=sys.stderr)
|
||||||
|
|
||||||
|
# Update the return code (to see if the process is finished)
|
||||||
|
return_code = proc.poll()
|
||||||
|
|
||||||
|
# Return the error code AND the full output of the command as a string
|
||||||
|
return return_code, '\n'.join(stdout_lines), '\n'.join(stderr_lines)
|
||||||
211
setup-scripts/app-role-access.py
Normal file
211
setup-scripts/app-role-access.py
Normal file
|
|
@ -0,0 +1,211 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# |*******************************************************************|
|
||||||
|
# | Setup script |
|
||||||
|
# | |
|
||||||
|
# | Author: Alan Bridgeman |
|
||||||
|
# | Created: 2024-03-30 |
|
||||||
|
# | |
|
||||||
|
# | COPYRIGHT © 2024 Bridgeman Accessible/Alan Bridgeman. |
|
||||||
|
# | |
|
||||||
|
# | This work is presented AS IS, with no warranty of any kind. |
|
||||||
|
# | Any modification or use of this script is at the user's own risk. |
|
||||||
|
# |*******************************************************************|
|
||||||
|
|
||||||
|
import os, sys, subprocess, logging
|
||||||
|
|
||||||
|
from CommandRunner import CommandRunner
|
||||||
|
|
||||||
|
#def run_command(command):
|
||||||
|
# try:
|
||||||
|
# result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, check=True)
|
||||||
|
# return result.stdout.decode('utf-8').strip()
|
||||||
|
# except subprocess.CalledProcessError as e:
|
||||||
|
# # Log the error and re-raise or handle as appropriate
|
||||||
|
# raise RuntimeError(f"Command '{' '.join(command)}' failed with error: {e.stderr}") from e
|
||||||
|
|
||||||
|
def create_policy(policy_name: str, policy_capabilities: list[str], policy_path: str = 'secret/*'):
|
||||||
|
"""Create a policy in vault
|
||||||
|
|
||||||
|
Args:
|
||||||
|
policy_name (str): The name of the policy to create
|
||||||
|
policy_capabilities (list[str]): The capabilities of the policy (ex. read, write, etc...)
|
||||||
|
policy_path (str, optional): The path the policy should apply to. Defaults to all secrets (`secret/*`).
|
||||||
|
"""
|
||||||
|
|
||||||
|
policy_caps = '["' + '","'.join(policy_capabilities) + '"]'
|
||||||
|
policy_content = 'path "' + policy_path + '" {\n capabilities = ' + policy_caps + '\n}'
|
||||||
|
policy = '<<EOF\n' + policy_content + '\nEOF'
|
||||||
|
policy_return_code, policy_output, policy_err = CommandRunner.run_command('vault policy write ' + policy_name + ' - ' + policy, False)
|
||||||
|
if policy_return_code == 2:
|
||||||
|
logging.error('Failed to create the policy')
|
||||||
|
logging.error('Policy Output: ' + policy_output)
|
||||||
|
logging.error('Policy Error: ' + policy_err)
|
||||||
|
raise RuntimeError('Failed to create the policy')
|
||||||
|
|
||||||
|
def get_role_id(role_name: str) -> str:
|
||||||
|
"""Get the `role_id` for a given role
|
||||||
|
|
||||||
|
Args:
|
||||||
|
role_name (str): The name of the role to get the `role_id` for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The `role_id` for the given role
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get the role_id from vault
|
||||||
|
role_read_path = '/'.join(['auth', 'approle', 'role', role_name, 'role-id'])
|
||||||
|
role_return_code, role_id, role_err = CommandRunner.run_command('vault read ' + role_read_path + ' | grep role_id | awk \'{print $2}\'')
|
||||||
|
|
||||||
|
logging.debug('Role ID: ' + role_id)
|
||||||
|
|
||||||
|
return role_id
|
||||||
|
|
||||||
|
def get_secret_id(role_name: str) -> str:
|
||||||
|
"""Get a/the `secret_id` for a given role
|
||||||
|
|
||||||
|
Args:
|
||||||
|
role_name (str): The name of the role to get the `secret_id` for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The `secret_id` for the given role
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get the secret_id from vault
|
||||||
|
secret_write_path = '/'.join(['auth', 'approle', 'role', role_name, 'secret-id'])
|
||||||
|
secret_return_code, secret_id, secret_err = CommandRunner.run_command('vault write -f ' + secret_write_path + ' | grep "secret_id " | awk \'{print $2}\'')
|
||||||
|
|
||||||
|
logging.debug('Secret ID: ' + secret_id)
|
||||||
|
|
||||||
|
return secret_id
|
||||||
|
|
||||||
|
def create_app_role(role_name: str, policy_name: str) -> tuple[str, str]:
|
||||||
|
"""Create an approle role and return the role_id and secret_id
|
||||||
|
|
||||||
|
Args:
|
||||||
|
role_name (str): The name of the role to create
|
||||||
|
policy_name (str): The name of the policy to associate with the role
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[str, str]: The `role_id` and `secret_id` of the newly created role
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Create a role
|
||||||
|
role_write_path = '/'.join(['auth', 'approle', 'role', role_name])
|
||||||
|
role_write_return_code, role_write_output, role_write_err = CommandRunner.run_command('vault write ' + role_write_path + ' token_policies="' + policy_name + '"')
|
||||||
|
|
||||||
|
logging.debug(role_write_output)
|
||||||
|
|
||||||
|
# Get the role_id
|
||||||
|
role_id = get_role_id(role_name)
|
||||||
|
|
||||||
|
# Get the secret_id
|
||||||
|
secret_id = get_secret_id(role_name)
|
||||||
|
|
||||||
|
return role_id, secret_id
|
||||||
|
|
||||||
|
def save_role_vars_to_backup_file(role_name: str, role_id: str, secret_id: str):
|
||||||
|
"""Save the role_id and secret_id to a backup file (in the `/vault/creds` directory which is mounted to the host)
|
||||||
|
|
||||||
|
This is used as a sort of backup in rare case where the `/role_vars/.env` doesn't persist across restarts/instances.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
role_name (str): The name of the role (used as the filename inside of the `/vault/creds` directory)
|
||||||
|
role_id (str): The `role_id` to save
|
||||||
|
secret_id (str): The `secret_id` to save
|
||||||
|
"""
|
||||||
|
|
||||||
|
file_name = f'/vault/creds/{role_name}'
|
||||||
|
|
||||||
|
logging.debug('File Name: ' + file_name)
|
||||||
|
|
||||||
|
# Create the file if it doesn't exist
|
||||||
|
if not os.path.isfile(file_name):
|
||||||
|
CommandRunner.run_command('touch "' + file_name + '"')
|
||||||
|
|
||||||
|
# Write the role_id and secret_id to the file
|
||||||
|
with open(file_name, 'w+') as f:
|
||||||
|
f.write('='.join([os.environ['ROLE_ID_SECRET_NAME'], role_id]) + '\n')
|
||||||
|
f.write('='.join([os.environ['SECRET_ID_SECRET_NAME'], secret_id]))
|
||||||
|
|
||||||
|
def save_role_vars_to_file(role_id: str, secret_id: str):
|
||||||
|
"""Save the role_id and secret_id to a file (`/role_vars/.env`)
|
||||||
|
|
||||||
|
This file can then be loaded/mounted/etc... into other containers or by other scripts to load the values so that the app can load it
|
||||||
|
|
||||||
|
Args:
|
||||||
|
role_id (str): The `role_id` to save
|
||||||
|
secret_id (str): The `secret_id` to save
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Create the directory if it doesn't exist
|
||||||
|
if not os.path.isdir('/role_vars'):
|
||||||
|
os.mkdir('/role_vars')
|
||||||
|
|
||||||
|
file_name = '/role_vars/.env'
|
||||||
|
|
||||||
|
logging.debug('File Name: ' + file_name)
|
||||||
|
|
||||||
|
# Create the file if it doesn't exist
|
||||||
|
if not os.path.isfile(file_name):
|
||||||
|
CommandRunner.run_command('touch "' + file_name + '"')
|
||||||
|
|
||||||
|
# Write the role_id and secret_id to the file
|
||||||
|
with open(file_name, 'w+') as f:
|
||||||
|
f.write('='.join([os.environ['ROLE_ID_SECRET_NAME'], role_id]) + '\n')
|
||||||
|
f.write('='.join([os.environ['SECRET_ID_SECRET_NAME'], secret_id]))
|
||||||
|
|
||||||
|
def main(token: str):
|
||||||
|
#logging.basicConfig(
|
||||||
|
# filename='/var/log/entrypoint.log',
|
||||||
|
# level=logging.DEBUG,
|
||||||
|
# format='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
#)
|
||||||
|
|
||||||
|
# Login to vault
|
||||||
|
CommandRunner.run_command(f'vault login {token}')
|
||||||
|
|
||||||
|
# Enable approle auth method
|
||||||
|
CommandRunner.run_command('vault auth enable approle')
|
||||||
|
|
||||||
|
# -- Create a policy for the app --
|
||||||
|
|
||||||
|
# The policy name can be set via the `POLICY_NAME` environment variable
|
||||||
|
# Or defaults to `node-app-policy` if not set
|
||||||
|
if 'POLICY_NAME' in os.environ:
|
||||||
|
policy_name = os.environ['POLICY_NAME']
|
||||||
|
else:
|
||||||
|
policy_name = 'node-app-policy'
|
||||||
|
|
||||||
|
logging.debug(f'Policy Name: {policy_name}')
|
||||||
|
|
||||||
|
# The policy capabilities can be set via the `POLICY_CAPABILITIES` environment variable
|
||||||
|
# Or defaults to `['read', 'create', 'update']` if not set
|
||||||
|
if 'POLICY_CAPABILITIES' in os.environ:
|
||||||
|
policy_capabilities = [cap.strip() for cap in os.environ['POLICY_CAPABILITIES'].split(',')]
|
||||||
|
else:
|
||||||
|
policy_capabilities = ['read', 'create', 'update']
|
||||||
|
|
||||||
|
logging.debug(f'Policy Capabilities: {', '.join(policy_capabilities)}')
|
||||||
|
|
||||||
|
create_policy(policy_name, policy_capabilities)
|
||||||
|
|
||||||
|
# -- create an approle role --
|
||||||
|
|
||||||
|
# Create a role
|
||||||
|
if 'ROLE_NAME' in os.environ:
|
||||||
|
role_name = os.environ['ROLE_NAME']
|
||||||
|
else:
|
||||||
|
role_name = 'node-app'
|
||||||
|
|
||||||
|
role_id, secret_id = create_app_role(role_name, policy_name)
|
||||||
|
|
||||||
|
# Save the role_id and secret_id to a backup file
|
||||||
|
save_role_vars_to_backup_file(role_name, role_id, secret_id)
|
||||||
|
|
||||||
|
# Save the role_id and secret_id to a file
|
||||||
|
save_role_vars_to_file(role_id, secret_id)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
token = sys.argv[1]
|
||||||
|
main(token)
|
||||||
144
setup-scripts/prod-setup.py
Normal file
144
setup-scripts/prod-setup.py
Normal file
|
|
@ -0,0 +1,144 @@
|
||||||
|
import os, json
|
||||||
|
from CommandRunner import CommandRunner
|
||||||
|
|
||||||
|
class Initializer:
|
||||||
|
def create_unseal_keys_file(self, file = '/vault/creds/unseal-keys'):
|
||||||
|
"""Write the vault's unseal keys to a file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file (str, optional): The path of the file to output. Defaults to '/vault/creds/unseal-keys'.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(file, 'w+') as f:
|
||||||
|
f.write('\n'.join(self.unseal_keys))
|
||||||
|
|
||||||
|
def create_root_token_file(self, file = '/vault/creds/root-token'):
|
||||||
|
"""Write the vault's root token to a file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file (str, optional): The path of the file to output. Defaults to '/vault/creds/root-token'.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(file, 'w+') as f:
|
||||||
|
f.write(self.root_token)
|
||||||
|
|
||||||
|
def init_vault(self):
|
||||||
|
"""Initialize vault
|
||||||
|
|
||||||
|
This includes creating the root token and unseal keys.
|
||||||
|
Which we want to store in case we need them later
|
||||||
|
"""
|
||||||
|
|
||||||
|
print('*----------------------*')
|
||||||
|
print('| Initialization Vault |')
|
||||||
|
print('*----------------------*')
|
||||||
|
|
||||||
|
# Initialize the vault
|
||||||
|
return_code, init_output, init_err = CommandRunner.run_command('vault operator init -format=json')
|
||||||
|
|
||||||
|
# Parse the unseal keys and root token from the initialization response
|
||||||
|
self.unseal_keys = json.loads(init_output)['unseal_keys_b64']
|
||||||
|
self.root_token = json.loads(init_output)['root_token']
|
||||||
|
|
||||||
|
# UPDATE: Is mounted as a volume instead
|
||||||
|
#CommandRunner.run_command('mkdir /vault/creds')
|
||||||
|
|
||||||
|
self.create_unseal_keys_file()
|
||||||
|
self.create_root_token_file()
|
||||||
|
|
||||||
|
def is_vault_unsealed(self) -> bool:
|
||||||
|
"""Check if the vault is sealed or not
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: If the vault is unsealed or not
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get the status of the vault
|
||||||
|
# Note, because it returns a non-zero exit code when the vault is sealed, we set check to False
|
||||||
|
# Which is also why we need to check the return code manually
|
||||||
|
seal_status_returncode, seal_status_raw, seal_status_err = CommandRunner.run_command('vault status -format=json', False)
|
||||||
|
|
||||||
|
# Verify the return code is either 0 (unsealed) or 2 (sealed)
|
||||||
|
if seal_status_returncode != 0 and seal_status_returncode != 2:
|
||||||
|
raise RuntimeError('Failed to get the status of the vault')
|
||||||
|
|
||||||
|
# Print the raw status
|
||||||
|
print(seal_status_raw)
|
||||||
|
|
||||||
|
# Parse the seal stat from the status
|
||||||
|
seal_status = json.loads(seal_status_raw)['sealed']
|
||||||
|
|
||||||
|
print(f'Is Sealed: {seal_status}')
|
||||||
|
|
||||||
|
return seal_status
|
||||||
|
|
||||||
|
def unseal_vault(self):
|
||||||
|
"""Unseal the vault"""
|
||||||
|
|
||||||
|
print('*-----------------*')
|
||||||
|
print('| Unsealing Vault |')
|
||||||
|
print('*-----------------*')
|
||||||
|
|
||||||
|
# Use each key to unseal the vault
|
||||||
|
for key in self.unseal_keys:
|
||||||
|
return_code, unseal_output, unseal_err = CommandRunner.run_command(f'vault operator unseal {key}')
|
||||||
|
|
||||||
|
print(unseal_output)
|
||||||
|
|
||||||
|
# If the vault is now unsealed break/escape from the loop
|
||||||
|
if not self.is_vault_unsealed():
|
||||||
|
print('Vault is unsealed')
|
||||||
|
break
|
||||||
|
|
||||||
|
def setup_secrets_engine(self):
|
||||||
|
"""Setup the secrets engine"""
|
||||||
|
|
||||||
|
print('*---------------------------*')
|
||||||
|
print('| Setting up secrets engine |')
|
||||||
|
print('*---------------------------*')
|
||||||
|
|
||||||
|
login_return_code, login_output, login_err = CommandRunner.run_command(f'vault login {self.root_token}')
|
||||||
|
|
||||||
|
print(login_output)
|
||||||
|
|
||||||
|
engin_enable_return_code, engine_enable_output, engine_enable_err = CommandRunner.run_command('vault secrets enable -path secret kv')
|
||||||
|
|
||||||
|
print(engine_enable_output)
|
||||||
|
|
||||||
|
def setup_audit_device(self):
|
||||||
|
print('*---------------------------*')
|
||||||
|
print('| Setting up Audit Device |')
|
||||||
|
print('*---------------------------*')
|
||||||
|
|
||||||
|
audit_return_code, audit_output, audit_err = CommandRunner.run_command('vault audit enable file file_path=/vault/logs/vault-audit.log')
|
||||||
|
|
||||||
|
print(audit_output)
|
||||||
|
|
||||||
|
def setup_app_role_access(self):
|
||||||
|
"""Run the app role creation script"""
|
||||||
|
|
||||||
|
print('*----------------------------*')
|
||||||
|
print('| Setting up App Role access |')
|
||||||
|
print('*----------------------------*')
|
||||||
|
|
||||||
|
print(f'Policy Capabilities: {os.getenv("POLICY_CAPABILITIES")}')
|
||||||
|
|
||||||
|
# Run the custom entrypoint Python script
|
||||||
|
CommandRunner.run_command_in_real_time(f'python3 /setup-scripts/app-role-access.py {self.root_token}')
|
||||||
|
|
||||||
|
def main():
|
||||||
|
initializer = Initializer()
|
||||||
|
# Check if the root-token file and unseal keys files exist
|
||||||
|
#if os.path.exists('/vault/creds/root-token') and os.path.exists('/vault/creds/unseal-keys'):
|
||||||
|
if not initializer.is_vault_unsealed():
|
||||||
|
print('Vault already setup. Skipping...')
|
||||||
|
# QUESTION: Should there be code here to get the Role ID and Secret ID in case the originally created .env file doesn't exist for some reason
|
||||||
|
else:
|
||||||
|
initializer.init_vault()
|
||||||
|
initializer.unseal_vault()
|
||||||
|
initializer.setup_secrets_engine()
|
||||||
|
initializer.setup_audit_device()
|
||||||
|
initializer.setup_app_role_access()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
287
snapshot-server/server.py
Normal file
287
snapshot-server/server.py
Normal file
|
|
@ -0,0 +1,287 @@
|
||||||
|
import sys, subprocess, tempfile, os, cgi, json
|
||||||
|
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||||
|
|
||||||
|
|
||||||
|
class VaultSnapshotHandler(BaseHTTPRequestHandler):
|
||||||
|
def create_and_serve_snapshot(self, snapshot_file):
|
||||||
|
"""Create a snapshot of the vault and return the contents as a stream/string (so that it can be served/downloaded via HTTP)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
snapshot_file (str): The path to the snapshot file to be created
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bytes: The contents of the snapshot file
|
||||||
|
"""
|
||||||
|
# Execute the Vault snapshot command
|
||||||
|
cmd = ['vault', 'operator', 'raft', 'snapshot', 'save', snapshot_file]
|
||||||
|
process = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
|
||||||
|
# Check if the Vault command executed successfully
|
||||||
|
# If it wasn't successful, return a 500 error with the error message
|
||||||
|
if process.returncode != 0:
|
||||||
|
self.send_response(500)
|
||||||
|
self.send_header('Content-Type', 'text/plain')
|
||||||
|
self.end_headers()
|
||||||
|
error_message = f"Error generating snapshot: {process.stderr}"
|
||||||
|
self.wfile.write(error_message.encode('utf-8'))
|
||||||
|
|
||||||
|
# Clean up the temporary file if the command failed
|
||||||
|
os.remove(snapshot_file)
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# Read the snapshot file content
|
||||||
|
with open(snapshot_file, 'rb') as f:
|
||||||
|
file_data = f.read()
|
||||||
|
|
||||||
|
return file_data
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
if self.path == '/snapshot':
|
||||||
|
# Create a temporary file to store the snapshot
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
|
||||||
|
snapshot_file = tmp_file.name
|
||||||
|
print(f"Temporary file created for snapshot: {snapshot_file}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
snapshot_data = self.create_and_serve_snapshot(snapshot_file)
|
||||||
|
|
||||||
|
# Send response headers to indicate a file attachment
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'application/octet-stream')
|
||||||
|
self.send_header('Content-Disposition', 'attachment; filename="raft_snapshot.snap"')
|
||||||
|
self.send_header('Content-Length', str(len(snapshot_data)))
|
||||||
|
self.end_headers()
|
||||||
|
|
||||||
|
# Send the file content as the response body
|
||||||
|
self.wfile.write(snapshot_data)
|
||||||
|
finally:
|
||||||
|
# Clean up the temporary file whether the command succeeded or failed
|
||||||
|
if os.path.exists(snapshot_file):
|
||||||
|
os.remove(snapshot_file)
|
||||||
|
else:
|
||||||
|
# If a path other than `/snapshot`
|
||||||
|
self.send_error(404, "Not Found")
|
||||||
|
|
||||||
|
def upload_snapshot(self, form):
|
||||||
|
"""Upload the snapshot file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
form (cgi.FieldStorage): The form data containing the snapshot file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The path to the temporary file containing the snapshot
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Expecting the file to be sent with the name 'file'
|
||||||
|
if 'file' not in form:
|
||||||
|
print("Missing 'file' field in the form data.")
|
||||||
|
self.send_error(400, "Missing 'file' field in the form data.")
|
||||||
|
return
|
||||||
|
|
||||||
|
file_item = form['file']
|
||||||
|
if not file_item.file:
|
||||||
|
print("No file uploaded in the 'file' field.")
|
||||||
|
self.send_error(400, "No file uploaded in the 'file' field.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Save the uploaded file to a temporary file
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
|
||||||
|
tmp_filename = tmp_file.name
|
||||||
|
print(f"Temporary file created for restore: {tmp_filename}")
|
||||||
|
file_item.file.seek(0)
|
||||||
|
tmp_file.write(file_item.file.read())
|
||||||
|
return tmp_filename
|
||||||
|
|
||||||
|
def upload_unseal_keys(self, form):
|
||||||
|
"""Upload the unseal keys file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
form (cgi.FieldStorage): The form data containing the unseal keys file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The path to the temporary file containing the unseal keys
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Expecting a text file to be sent with the name 'unseal_keys'
|
||||||
|
if 'unseal_keys' not in form:
|
||||||
|
print("Missing 'unseal_keys' field in the form data.")
|
||||||
|
self.send_error(400, "Missing 'unseal_keys' field in the form data.")
|
||||||
|
return
|
||||||
|
|
||||||
|
file_item = form['unseal_keys']
|
||||||
|
if not file_item.file:
|
||||||
|
print("No file uploaded in the 'unseal_keys' field.")
|
||||||
|
self.send_error(400, "No file uploaded in the 'unseal_keys' field.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Save the uploaded file to a temporary file
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
|
||||||
|
tmp_filename = tmp_file.name
|
||||||
|
print(f"Temporary unseal_keys file created for restore: {tmp_filename}")
|
||||||
|
file_item.file.seek(0)
|
||||||
|
tmp_file.write(file_item.file.read())
|
||||||
|
return tmp_filename
|
||||||
|
|
||||||
|
def restore_backup(self, snapshot_file) -> bool:
|
||||||
|
"""Restore the vault from a snapshot
|
||||||
|
|
||||||
|
Args:
|
||||||
|
snapshot_file (str): The path to the snapshot file to be restored
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the restore was successful, False otherwise
|
||||||
|
"""
|
||||||
|
|
||||||
|
print('+--------------------------+')
|
||||||
|
print('| Restoring Vault Snapshot |')
|
||||||
|
print('+--------------------------+')
|
||||||
|
|
||||||
|
print(f"Restoring snapshot from: {snapshot_file}")
|
||||||
|
|
||||||
|
# Run the Vault restore command
|
||||||
|
cmd = ['vault', 'operator', 'raft', 'snapshot', 'restore', '--force', snapshot_file]
|
||||||
|
process = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
|
||||||
|
# If the restore fails return a 500 error with the error message
|
||||||
|
if process.returncode != 0:
|
||||||
|
# Setup error response headers
|
||||||
|
self.send_response(500)
|
||||||
|
self.send_header('Content-Type', 'text/plain')
|
||||||
|
self.end_headers()
|
||||||
|
|
||||||
|
# Create the error message
|
||||||
|
error_message = f"Error restoring snapshot: {process.stderr}"
|
||||||
|
|
||||||
|
# Print the error to the console (or equivalent)
|
||||||
|
print(error_message.strip())
|
||||||
|
|
||||||
|
# Send the error message as part of the response
|
||||||
|
self.wfile.write(error_message.encode('utf-8'))
|
||||||
|
|
||||||
|
# Indicate to the caller that the restore failed
|
||||||
|
return False
|
||||||
|
|
||||||
|
print('Snapshot restored successfully')
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def is_vault_unsealed(self) -> bool:
|
||||||
|
"""Check if the vault is sealed or not
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: If the vault is unsealed or not
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get the status of the vault
|
||||||
|
# Note, because it returns a non-zero exit code when the vault is sealed, we set check to False
|
||||||
|
# Which is also why we need to check the return code manually
|
||||||
|
process = subprocess.run(['vault', 'status', '-format=json'], capture_output=True, text=True)
|
||||||
|
|
||||||
|
# Verify the return code is either 0 (unsealed) or 2 (sealed)
|
||||||
|
if process.returncode != 0 and process.returncode != 2:
|
||||||
|
raise RuntimeError('Failed to get the status of the vault')
|
||||||
|
|
||||||
|
# Print the raw status
|
||||||
|
print(process.stdout.strip())
|
||||||
|
|
||||||
|
# Parse the seal stat from the status
|
||||||
|
seal_status = json.loads(process.stdout.strip())['sealed']
|
||||||
|
|
||||||
|
print(f'Is Sealed: {seal_status}')
|
||||||
|
|
||||||
|
return seal_status
|
||||||
|
|
||||||
|
def unseal_vault(self, unseal_keys):
|
||||||
|
"""Unseal a restored vault"""
|
||||||
|
|
||||||
|
print('+--------------------------+')
|
||||||
|
print('| Unsealing restored Vault |')
|
||||||
|
print('+--------------------------+')
|
||||||
|
|
||||||
|
# Use each key to unseal the vault
|
||||||
|
for key in unseal_keys:
|
||||||
|
process = subprocess.run(['vault', 'operator', 'unseal', key], capture_output=True, text=True)
|
||||||
|
|
||||||
|
print(process.stdout.strip())
|
||||||
|
|
||||||
|
# If the vault is now unsealed break/escape from the loop
|
||||||
|
if not self.is_vault_unsealed():
|
||||||
|
print('Vault is unsealed')
|
||||||
|
break
|
||||||
|
|
||||||
|
def do_POST(self):
|
||||||
|
"""Handle POST requests to restore a snapshot"""
|
||||||
|
|
||||||
|
if self.path == '/restore':
|
||||||
|
content_type = self.headers.get('Content-Type')
|
||||||
|
ctype, pdict = cgi.parse_header(content_type)
|
||||||
|
|
||||||
|
# Verify the request is formatted properly (`multipart/form-data``)
|
||||||
|
if ctype != 'multipart/form-data':
|
||||||
|
print(f"Invalid Content-Type: {content_type}")
|
||||||
|
self.send_error(400, 'Content-Type must be multipart/form-data')
|
||||||
|
return
|
||||||
|
|
||||||
|
# cgi.FieldStorage requires the boundary to be bytes
|
||||||
|
pdict['boundary'] = pdict['boundary'].encode('utf-8')
|
||||||
|
pdict['CONTENT-LENGTH'] = int(self.headers.get('Content-Length', 0))
|
||||||
|
|
||||||
|
form = cgi.FieldStorage(
|
||||||
|
fp=self.rfile,
|
||||||
|
headers=self.headers,
|
||||||
|
environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': content_type}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Upload/Save the files that were included in the request
|
||||||
|
snapshot_file = self.upload_snapshot(form)
|
||||||
|
unseal_keys_file = self.upload_unseal_keys(form)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Restore the snapshot using the uploaded file
|
||||||
|
self.restore_backup(snapshot_file)
|
||||||
|
|
||||||
|
# Load the unseal keys from the uploaded file
|
||||||
|
# And unseal the restored vault
|
||||||
|
with open(unseal_keys_file, 'r') as f:
|
||||||
|
unseal_keys = f.read().splitlines()
|
||||||
|
self.unseal_vault(unseal_keys)
|
||||||
|
|
||||||
|
# If successful, return a JSON response with a success message
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'application/json')
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(b'{"status": "success", "message": "Snapshot restored successfully"}')
|
||||||
|
finally:
|
||||||
|
# Remove the temporary files regardless of success or failure
|
||||||
|
if os.path.exists(snapshot_file):
|
||||||
|
os.remove(snapshot_file)
|
||||||
|
|
||||||
|
if os.path.exists(unseal_keys_file):
|
||||||
|
os.remove(unseal_keys_file)
|
||||||
|
else:
|
||||||
|
# If a path other than `/restore` is requested
|
||||||
|
self.send_error(404, "Endpoint not found.")
|
||||||
|
|
||||||
|
# Optionally override logging to avoid default console messages
|
||||||
|
def log_message(self, format, *args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run_server(port=8300):
|
||||||
|
server_address = ('', port)
|
||||||
|
httpd = HTTPServer(server_address, VaultSnapshotHandler)
|
||||||
|
print(f"Starting HTTP server on port {port}...")
|
||||||
|
try:
|
||||||
|
httpd.serve_forever()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nServer interrupted by user, shutting down.")
|
||||||
|
finally:
|
||||||
|
httpd.server_close()
|
||||||
|
print("Server stopped.")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print('+--------------------------+')
|
||||||
|
print('| Starting snapshot server |')
|
||||||
|
print('+--------------------------+')
|
||||||
|
|
||||||
|
run_server(sys.argv[1] if len(sys.argv) > 1 else 8300)
|
||||||
25
vault-config.hcl
Normal file
25
vault-config.hcl
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
# Storage backend
|
||||||
|
#storage "file" {
|
||||||
|
# path = "/vault/data"
|
||||||
|
#}
|
||||||
|
|
||||||
|
ui = true
|
||||||
|
|
||||||
|
# HTTP listener with TLS
|
||||||
|
listener "tcp" {
|
||||||
|
address = "0.0.0.0:8200"
|
||||||
|
tls_disable = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Disabled Memory Lock
|
||||||
|
disable_mlock = true
|
||||||
|
|
||||||
|
# API Address
|
||||||
|
api_addr = "http://localhost:8200"
|
||||||
|
|
||||||
|
storage "raft" {
|
||||||
|
path = "/vault/data"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cluster address (optional, can be same as api_addr if not using clustering)
|
||||||
|
cluster_addr = "http://localhost:8201"
|
||||||
Loading…
Add table
Add a link
Reference in a new issue