init dump

This commit is contained in:
2025-08-21 19:26:04 -05:00
parent 5b305f82a4
commit 0c1a660aa8
11 changed files with 486 additions and 0 deletions

10
iac/Pulumi.yaml Normal file
View File

@@ -0,0 +1,10 @@
name: hetzner-k3s
description: A multi-region k3s cluster on Hetzner Cloud
runtime:
name: nodejs
options:
packagemanager: npm
config:
pulumi:tags:
value:
pulumi:template: typescript

69
iac/README.md Normal file
View File

@@ -0,0 +1,69 @@
## Infrastructure as Code using Pulumi (will swap it to Clojurescript after I get it stable)
To preface, writing it initially in Clojurescript without an immaculate handle on Pulumi is not the best idea. Easier to do it in Javascript for the sake of having docs to reference.
My cluster configuration that serves to automate the deployment and handling of my services that I use for personal and public tasks. The goal initially of this is to both reduce my cost overhead (I was using vultr), improve reproducibility (we love Guix after all), increase stability as any change prior was changing a docker compose and potentially bringing services down on any mistakes (Caddy being the central funnel was a blessing and a curse), as well as improve security as our secrets and such can now be contained in OpenBao (Hashicorp Vault, but open source and maintained by the Linux Foundation).
I'll try to include any pertinent documentation here in the tooling I use or the setup.
#### Upcoming
Initially we'll try to migrate our services from a docker compose and into a reproducible and controlled deployment scheme here. I'll also likely break this into its own repo and instead reference it as a submodule in our dotfiles (because it makes far more sense that way).
#### Goals
The long term goal is for this to be a mostly uninteractive, to completion set up of my cloud services. Since it'll be IaC should I ever choose down the road to migrate certain ones to local nodes I run then that effort should also be more or less feasible.
### Vault
Vault set up requires doing this when it gets everything provisioned (you'll have to cancel the pulumi up)
Run this:
```pulumi stack output kubeconfig --show-secrets > kubeconfig.yaml ```
```
kubectl --kubeconfig=kubeconfig.yaml exec -n vault -it vault-0 -- /bin/sh
```
Inside the new shell:
```
vault operator init
vault operator unseal <PASTE_UNSEAL_KEY_1>
vault operator unseal <PASTE_UNSEAL_KEY_2>
vault operator unseal <PASTE_UNSEAL_KEY_3>
```
Then you need to run:
```
kubectl --kubeconfig=kubeconfig.yaml port-forward -n vault vault-0 8200:8200
```
This enables us to access the openbao UI in our browser
Open a new terminal window (leave that one open as it establishes a connection to the vault)
Run the following:
```
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='<PASTE_YOUR_INITIAL_ROOT_TOKEN>'
```
Open another terminal and connect to the pod
```
kubectl --kubeconfig=kubeconfig.yaml exec -it openbao-0 -n vault -- /bin/sh
```
then run:
```
# Set your token
export BAO_TOKEN='<PASTE_YOUR_INITIAL_ROOT_TOKEN>'
# Enables secrets
vault secrets enable -path=secret kv-v2
```
Just enables kv-v2 secrets engine
You can then do:
```
bao kv put secret/nextcloud adminPassword="..." dbPassword="..."
```
or just use the UI in your browser at 127.0.0.1:8200 since you're portforwarded to it

31
iac/index.js Normal file
View File

@@ -0,0 +1,31 @@
const k8s = require("@pulumi/kubernetes");
const core = require("./src/js/core");
const vault = require("./src/js/k8/openbao/openbao");
const nextcloud = require("./src/js/k8/nextcloud/nextcloud");
const hetznercsi = require('./src/js/k8/csi-drivers/hetzner');
async function main() {
const cluster = core.createCluster();
const appOutputs = cluster.kubeconfig.apply(async (kc) => {
const provider = new k8s.Provider("k8s-dynamic-provider", {
kubeconfig: kc,
});
hetznercsi.deployCsiDriver(provider);
vault.deployVault(provider);
//const app = await nextcloud.deployNextcloudApp(kc, provider);
return {
//nextcloudUrl: app.nextcloudUrl,
};
});
return {
masterIp: cluster.masterIp,
kubeconfig: cluster.kubeconfig,
//nextcloudUrl: appOutputs.nextcloudUrl,
};
}
module.exports = main();

22
iac/package.json Normal file
View File

@@ -0,0 +1,22 @@
{
"name": "vultr-k8s",
"main": "index.js",
"scripts": {
"build": "shadow-cljs release app",
"pulumi": "pulumi up",
"deploy": "npm run build && npm run pulumi"
},
"devDependencies": {
"@types/node": "^18",
"shadow-cljs": "^2.20.0",
"typescript": "^5.0.0"
},
"dependencies": {
"@pulumi/pulumi": "^3.113.0",
"@pulumi/hcloud": "^1.24.0",
"@pulumi/esc-sdk": "0.12.1",
"@pulumi/command": "^1.1.0",
"@pulumi/kubernetes": "^4.23.0",
"@pulumi/vault": "^7.2.1"
}
}

7
iac/shadow-cljs.edn Normal file
View File

@@ -0,0 +1,7 @@
;; shadow-cljs.edn
{:source-paths ["src/main"]
:dependencies []
:builds
{:app {:target :node-script
:output-to "index.js"
:main infra.core/main!}}}

143
iac/src/js/core.js Normal file
View File

@@ -0,0 +1,143 @@
const pulumi = require("@pulumi/pulumi");
const hcloud = require("@pulumi/hcloud");
const command = require("@pulumi/command/remote");
const k8s = require("@pulumi/kubernetes");
const local = require("@pulumi/command/local");
const fs = require("fs");
exports.createCluster = function() {
const config = new pulumi.Config();
const sshKeyName = config.require("sshKeyName");
const privateKey = config.requireSecret("privateKeySsh");
const installMasterScript = (publicIp) => `
# Install k3s if it's not already present.
if ! command -v k3s >/dev/null; then
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-backend=wireguard-native --node-external-ip=${publicIp}" sh -
fi
# Wait until the kubeconfig is readable and a kubectl command succeeds.
until sudo k3s kubectl get node > /dev/null 2>&1; do
echo "Waiting for master node to be ready..."
sleep 5
done
`;
const installWorkerScript = (masterIp, token) => `#!/bin/bash
# Redirect all output (stdout and stderr) to a log file for debugging.
exec > /root/k3s-install.log 2>&1
set -x # Echo every command being executed to the log file.
echo "--- Starting k3s worker installation script at $(date) ---"
# Add a loop to wait for network connectivity to the master node.
until ping -c1 ${masterIp}; do
echo "Waiting for network connectivity to master..."
sleep 2
done
echo "Network is up. Discovering this node's public IP..."
# Use an external service to find the public IP address of this server.
WORKER_PUBLIC_IP=$(curl -s https://ifconfig.me/ip)
# The dollar sign is escaped (\$) so it's interpreted by the remote shell.
echo "Discovered public IP: \${WORKER_PUBLIC_IP}"
echo "Proceeding with k3s agent installation."
if ! command -v k3s >/dev/null; then
echo "k3s not found, attempting installation..."
# Pass the discovered public IP to the k3s installer.
# The dollar sign for WORKER_PUBLIC_IP is escaped (\$) so that it's interpreted
# by the bash script on the remote server, not by the local Node.js process.
curl -sfL https://get.k3s.io | K3S_URL=https://${masterIp}:6443 K3S_TOKEN="${token}" INSTALL_K3S_EXEC="--node-external-ip=\${WORKER_PUBLIC_IP}" sh -
echo "k3s installation script finished with exit code $?."
else
echo "k3s is already installed."
fi
echo "--- Finished k3s worker installation script at $(date) ---"
`;
const firewall = new hcloud.Firewall("k3s-firewall", {
rules: [
{ direction: "in", protocol: "tcp", port: "22", sourceIps: ["0.0.0.0/0", "::/0"] },
{ direction: "in", protocol: "tcp", port: "6443", sourceIps: ["0.0.0.0/0", "::/0"] },
{ direction: "in", protocol: "udp", port: "51820", sourceIps: ["0.0.0.0/0", "::/0"] },
{ direction: "in", protocol: "icmp", sourceIps: ["0.0.0.0/0", "::/0"] },
],
});
const master = new hcloud.Server("k3s-master-de", {
serverType: "cx22",
image: "ubuntu-22.04",
location: "fsn1",
sshKeys: [sshKeyName],
firewallIds: [firewall.id],
});
const masterConnection = {
host: master.ipv4Address,
user: "root",
privateKey: privateKey,
};
const installMaster = new command.Command("install-master", {
connection: masterConnection,
create: master.ipv4Address.apply(installMasterScript),
}, { dependsOn: [master] });
const tokenCmd = new command.Command("get-token", {
connection: masterConnection,
create: "sudo cat /var/lib/rancher/k3s/server/node-token",
}, { dependsOn: [installMaster] });
const workerScript = pulumi.all([master.ipv4Address, tokenCmd.stdout]).apply(([ip, token]) =>
installWorkerScript(ip, token.trim())
);
const workerDe = new hcloud.Server("k3s-worker-de", {
serverType: "cx22",
image: "ubuntu-22.04",
location: "fsn1",
sshKeys: [sshKeyName],
userData: workerScript,
firewallIds: [firewall.id],
});
const workerUs = new hcloud.Server("k3s-worker-us", {
serverType: "cpx11",
image: "ubuntu-22.04",
location: "ash",
sshKeys: [sshKeyName],
userData: workerScript,
firewallIds: [firewall.id],
});
const kubeconfigCmd = new command.Command("get-kubeconfig", {
connection: masterConnection,
create: master.ipv4Address.apply(ip =>
`sudo sed 's/127.0.0.1/${ip}/' /etc/rancher/k3s/k3s.yaml`
),
}, { dependsOn: [installMaster] });
const labelNodeCmd = new local.Command("label-german-node", {
create: pulumi.all([kubeconfigCmd.stdout, workerDe.name]).apply(([kubeconfig, workerName]) => {
const tempKubeconfigFile = "./kubeconfig.yaml";
fs.writeFileSync(tempKubeconfigFile, kubeconfig);
return `kubectl --kubeconfig=${tempKubeconfigFile} label node ${workerName} location=de --overwrite`;
}),
}, { dependsOn: [kubeconfigCmd] });
return {
masterIp: master.ipv4Address,
workerDeIp: workerDe.ipv4Address,
workerUsIp: workerUs.ipv4Address,
kubeconfig: pulumi.secret(kubeconfigCmd.stdout),
};
}

View File

@@ -0,0 +1,46 @@
const pulumi = require("@pulumi/pulumi");
const k8s = require("@pulumi/kubernetes");
/**
* Deploys the Hetzner CSI driver to the cluster.
* @param {k8s.Provider} provider - The Kubernetes provider to deploy resources with.
*/
exports.deployCsiDriver = function(provider) {
const hcloudConfig = new pulumi.Config("hcloud");
const hcloudToken = hcloudConfig.requireSecret("token");
const csiSecret = new k8s.core.v1.Secret("hcloud-csi-secret", {
metadata: {
name: "hcloud",
namespace: "kube-system",
},
stringData: {
token: hcloudToken,
},
}, { provider });
const csiChart = new k8s.helm.v3.Chart("hcloud-csi", {
chart: "hcloud-csi",
fetchOpts: { repo: "https://charts.hetzner.cloud" },
namespace: "kube-system",
values: {
controller: {
secret: {
enabled: false,
},
existingSecret: {
name: csiSecret.metadata.name,
}
},
node: {
existingSecret: {
name: csiSecret.metadata.name,
}
}
},
}, {
provider,
dependsOn: [csiSecret],
});
return { csiChart };
};

View File

@@ -0,0 +1,30 @@
const k8s = require("@pulumi/kubernetes");
const fs = require("fs");
const path = require("path");
const yaml = require("js-yaml");
/**
* Deploys HashiCorp Vault using the official Helm chart.
* @param {k8s.Provider} provider - The Kubernetes provider to deploy resources with.
*/
exports.deployVault = function(provider) {
const ns = new k8s.core.v1.Namespace("vault-ns", {
metadata: { name: "vault" }
}, { provider });
const valuesYamlPath = path.join(__dirname, 'values.yaml');
const valuesYaml = fs.readFileSync(valuesYamlPath, "utf8");
const helmValues = yaml.load(valuesYaml);
const vaultChart = new k8s.helm.v3.Chart("openbao", {
chart: "openbao",
fetchOpts: { repo: "https://openbao.github.io/openbao-helm" },
namespace: ns.metadata.name,
values: helmValues,
}, {
provider,
dependsOn: [ns],
});
return { vaultNamespace: ns.metadata.name };
};

View File

@@ -0,0 +1,17 @@
ui:
enabled: true
server:
standalone:
enabled: true
ha:
enabled: false
dataStorage:
enabled: true
size: 2Gi
storageClass: "hcloud-volumes"
nodeSelector:
location: "de"

View File

@@ -0,0 +1,93 @@
(ns infra.core
(:require ["@pulumi/pulumi" :as pulumi]
["@pulumi/hcloud" :as hcloud]
["@pulumi/command/remote" :as command]
["@pulumi/kubernetes" :as k8s]))
(def config (pulumi/Config.))
(def ssh-key-name (.require config "sshKeyName"))
(def private-key (.requireSecret config "privateKeySsh"))
(defn install-master-script [public-ip]
(str "if ! command -v k3s >/dev/null; then\n"
" curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC=\"--flannel-backend=wireguard-native --node-external-ip="
public-ip
"\" sh -\n"
"fi"))
(defn install-worker-script [master-ip token]
(pulumi/interpolate
(str "if ! command -v k3s >/dev/null; then\n"
" curl -sfL https://get.k3s.io | K3S_URL=https://"
master-ip
":6443 K3S_TOKEN=\"" token "\" sh -\n"
"fi")))
(defn hcloud-server [name server-type location ssh-key & {:keys [user-data]}]
(hcloud/Server. name
#js {:serverType server-type
:image "ubuntu-22.04"
:location location
:sshKeys #js [ssh-key]
:userData user-data}))
(defn ssh-connection [ip]
#js {:host ip
:user "root"
:privateKey private-key})
(defn main! []
(let [
master (hcloud-server "k3s-master-de" "cx22" "fsn1" ssh-key-name)
master-conn (.apply (.-ipv4Address master) ssh-connection)
install-master (command/Command. "install-master"
#js {:connection master-conn
:create (.apply (.-ipv4Address master)
install-master-script)})
token-cmd (-> (.-stdout install-master)
(.apply (fn [_]
(command/Command. "get-token"
#js {:connection master-conn
:create "cat /var/lib/rancher/k3s/server/node-token"}))))
token-stdout (-> token-cmd (.apply (fn [cmd] (.-stdout cmd))))
worker-script (install-worker-script (.-ipv4Address master) token-stdout)
worker-de (hcloud-server "k3s-worker-de" "cx22" "fsn1" ssh-key-name :user-data worker-script)
worker-us (hcloud-server "k3s-worker-us" "cpx11" "ash" ssh-key-name :user-data worker-script)
kubeconfig-cmd (-> (pulumi/all #js [(.-stdout install-master) (.-ipv4Address master)])
(.apply (fn [[_ master-ip]]
(command/Command. "get-kubeconfig"
#js {:connection master-conn
:create (str "sleep 10 &&" "sed 's/127.0.0.1/" master-ip "/' /etc/rancher/k3s/k3s.yaml")}))))
kubeconfig-stdout (-> kubeconfig-cmd (.apply (fn [cmd] (.-stdout cmd))))
all-workers-ready (pulumi/all #js [(.-urn worker-de) (.-urn worker-us)])
ready-kubeconfig (pulumi/all #js [kubeconfig-stdout all-workers-ready]
(fn [[kc _]] kc))
k8s-provider (k8s/Provider. "k8s-provider"
#js {:kubeconfig ready-kubeconfig})]
(-> (pulumi/all #js [(.-ipv4Address master)
(.-ipv4Address worker-de)
(.-ipv4Address worker-us)
kubeconfig-stdout])
(.apply (fn [[master-ip worker-de-ip worker-us-ip kc]]
(js-obj
"masterIp" master-ip
"workerDeIp" worker-de-ip
"workerUsIp" worker-us-ip
"kubeconfig" (pulumi/secret kc)))))))
(set! (.-main js/module) main!)

18
iac/tsconfig.json Normal file
View File

@@ -0,0 +1,18 @@
{
"compilerOptions": {
"strict": true,
"outDir": "bin",
"target": "es2020",
"module": "commonjs",
"moduleResolution": "node",
"sourceMap": true,
"experimentalDecorators": true,
"pretty": true,
"noFallthroughCasesInSwitch": true,
"noImplicitReturns": true,
"forceConsistentCasingInFileNames": true
},
"files": [
"index.ts"
]
}