init dump

This commit is contained in:
2025-08-21 19:26:04 -05:00
parent 5b305f82a4
commit 0c1a660aa8
11 changed files with 486 additions and 0 deletions

143
iac/src/js/core.js Normal file
View File

@@ -0,0 +1,143 @@
const pulumi = require("@pulumi/pulumi");
const hcloud = require("@pulumi/hcloud");
const command = require("@pulumi/command/remote");
const k8s = require("@pulumi/kubernetes");
const local = require("@pulumi/command/local");
const fs = require("fs");
exports.createCluster = function() {
const config = new pulumi.Config();
const sshKeyName = config.require("sshKeyName");
const privateKey = config.requireSecret("privateKeySsh");
const installMasterScript = (publicIp) => `
# Install k3s if it's not already present.
if ! command -v k3s >/dev/null; then
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-backend=wireguard-native --node-external-ip=${publicIp}" sh -
fi
# Wait until the kubeconfig is readable and a kubectl command succeeds.
until sudo k3s kubectl get node > /dev/null 2>&1; do
echo "Waiting for master node to be ready..."
sleep 5
done
`;
const installWorkerScript = (masterIp, token) => `#!/bin/bash
# Redirect all output (stdout and stderr) to a log file for debugging.
exec > /root/k3s-install.log 2>&1
set -x # Echo every command being executed to the log file.
echo "--- Starting k3s worker installation script at $(date) ---"
# Add a loop to wait for network connectivity to the master node.
until ping -c1 ${masterIp}; do
echo "Waiting for network connectivity to master..."
sleep 2
done
echo "Network is up. Discovering this node's public IP..."
# Use an external service to find the public IP address of this server.
WORKER_PUBLIC_IP=$(curl -s https://ifconfig.me/ip)
# The dollar sign is escaped (\$) so it's interpreted by the remote shell.
echo "Discovered public IP: \${WORKER_PUBLIC_IP}"
echo "Proceeding with k3s agent installation."
if ! command -v k3s >/dev/null; then
echo "k3s not found, attempting installation..."
# Pass the discovered public IP to the k3s installer.
# The dollar sign for WORKER_PUBLIC_IP is escaped (\$) so that it's interpreted
# by the bash script on the remote server, not by the local Node.js process.
curl -sfL https://get.k3s.io | K3S_URL=https://${masterIp}:6443 K3S_TOKEN="${token}" INSTALL_K3S_EXEC="--node-external-ip=\${WORKER_PUBLIC_IP}" sh -
echo "k3s installation script finished with exit code $?."
else
echo "k3s is already installed."
fi
echo "--- Finished k3s worker installation script at $(date) ---"
`;
const firewall = new hcloud.Firewall("k3s-firewall", {
rules: [
{ direction: "in", protocol: "tcp", port: "22", sourceIps: ["0.0.0.0/0", "::/0"] },
{ direction: "in", protocol: "tcp", port: "6443", sourceIps: ["0.0.0.0/0", "::/0"] },
{ direction: "in", protocol: "udp", port: "51820", sourceIps: ["0.0.0.0/0", "::/0"] },
{ direction: "in", protocol: "icmp", sourceIps: ["0.0.0.0/0", "::/0"] },
],
});
const master = new hcloud.Server("k3s-master-de", {
serverType: "cx22",
image: "ubuntu-22.04",
location: "fsn1",
sshKeys: [sshKeyName],
firewallIds: [firewall.id],
});
const masterConnection = {
host: master.ipv4Address,
user: "root",
privateKey: privateKey,
};
const installMaster = new command.Command("install-master", {
connection: masterConnection,
create: master.ipv4Address.apply(installMasterScript),
}, { dependsOn: [master] });
const tokenCmd = new command.Command("get-token", {
connection: masterConnection,
create: "sudo cat /var/lib/rancher/k3s/server/node-token",
}, { dependsOn: [installMaster] });
const workerScript = pulumi.all([master.ipv4Address, tokenCmd.stdout]).apply(([ip, token]) =>
installWorkerScript(ip, token.trim())
);
const workerDe = new hcloud.Server("k3s-worker-de", {
serverType: "cx22",
image: "ubuntu-22.04",
location: "fsn1",
sshKeys: [sshKeyName],
userData: workerScript,
firewallIds: [firewall.id],
});
const workerUs = new hcloud.Server("k3s-worker-us", {
serverType: "cpx11",
image: "ubuntu-22.04",
location: "ash",
sshKeys: [sshKeyName],
userData: workerScript,
firewallIds: [firewall.id],
});
const kubeconfigCmd = new command.Command("get-kubeconfig", {
connection: masterConnection,
create: master.ipv4Address.apply(ip =>
`sudo sed 's/127.0.0.1/${ip}/' /etc/rancher/k3s/k3s.yaml`
),
}, { dependsOn: [installMaster] });
const labelNodeCmd = new local.Command("label-german-node", {
create: pulumi.all([kubeconfigCmd.stdout, workerDe.name]).apply(([kubeconfig, workerName]) => {
const tempKubeconfigFile = "./kubeconfig.yaml";
fs.writeFileSync(tempKubeconfigFile, kubeconfig);
return `kubectl --kubeconfig=${tempKubeconfigFile} label node ${workerName} location=de --overwrite`;
}),
}, { dependsOn: [kubeconfigCmd] });
return {
masterIp: master.ipv4Address,
workerDeIp: workerDe.ipv4Address,
workerUsIp: workerUs.ipv4Address,
kubeconfig: pulumi.secret(kubeconfigCmd.stdout),
};
}

View File

@@ -0,0 +1,46 @@
const pulumi = require("@pulumi/pulumi");
const k8s = require("@pulumi/kubernetes");
/**
* Deploys the Hetzner CSI driver to the cluster.
* @param {k8s.Provider} provider - The Kubernetes provider to deploy resources with.
*/
exports.deployCsiDriver = function(provider) {
const hcloudConfig = new pulumi.Config("hcloud");
const hcloudToken = hcloudConfig.requireSecret("token");
const csiSecret = new k8s.core.v1.Secret("hcloud-csi-secret", {
metadata: {
name: "hcloud",
namespace: "kube-system",
},
stringData: {
token: hcloudToken,
},
}, { provider });
const csiChart = new k8s.helm.v3.Chart("hcloud-csi", {
chart: "hcloud-csi",
fetchOpts: { repo: "https://charts.hetzner.cloud" },
namespace: "kube-system",
values: {
controller: {
secret: {
enabled: false,
},
existingSecret: {
name: csiSecret.metadata.name,
}
},
node: {
existingSecret: {
name: csiSecret.metadata.name,
}
}
},
}, {
provider,
dependsOn: [csiSecret],
});
return { csiChart };
};

View File

@@ -0,0 +1,30 @@
const k8s = require("@pulumi/kubernetes");
const fs = require("fs");
const path = require("path");
const yaml = require("js-yaml");
/**
* Deploys HashiCorp Vault using the official Helm chart.
* @param {k8s.Provider} provider - The Kubernetes provider to deploy resources with.
*/
exports.deployVault = function(provider) {
const ns = new k8s.core.v1.Namespace("vault-ns", {
metadata: { name: "vault" }
}, { provider });
const valuesYamlPath = path.join(__dirname, 'values.yaml');
const valuesYaml = fs.readFileSync(valuesYamlPath, "utf8");
const helmValues = yaml.load(valuesYaml);
const vaultChart = new k8s.helm.v3.Chart("openbao", {
chart: "openbao",
fetchOpts: { repo: "https://openbao.github.io/openbao-helm" },
namespace: ns.metadata.name,
values: helmValues,
}, {
provider,
dependsOn: [ns],
});
return { vaultNamespace: ns.metadata.name };
};

View File

@@ -0,0 +1,17 @@
ui:
enabled: true
server:
standalone:
enabled: true
ha:
enabled: false
dataStorage:
enabled: true
size: 2Gi
storageClass: "hcloud-volumes"
nodeSelector:
location: "de"

View File

@@ -0,0 +1,93 @@
(ns infra.core
(:require ["@pulumi/pulumi" :as pulumi]
["@pulumi/hcloud" :as hcloud]
["@pulumi/command/remote" :as command]
["@pulumi/kubernetes" :as k8s]))
(def config (pulumi/Config.))
(def ssh-key-name (.require config "sshKeyName"))
(def private-key (.requireSecret config "privateKeySsh"))
(defn install-master-script [public-ip]
(str "if ! command -v k3s >/dev/null; then\n"
" curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC=\"--flannel-backend=wireguard-native --node-external-ip="
public-ip
"\" sh -\n"
"fi"))
(defn install-worker-script [master-ip token]
(pulumi/interpolate
(str "if ! command -v k3s >/dev/null; then\n"
" curl -sfL https://get.k3s.io | K3S_URL=https://"
master-ip
":6443 K3S_TOKEN=\"" token "\" sh -\n"
"fi")))
(defn hcloud-server [name server-type location ssh-key & {:keys [user-data]}]
(hcloud/Server. name
#js {:serverType server-type
:image "ubuntu-22.04"
:location location
:sshKeys #js [ssh-key]
:userData user-data}))
(defn ssh-connection [ip]
#js {:host ip
:user "root"
:privateKey private-key})
(defn main! []
(let [
master (hcloud-server "k3s-master-de" "cx22" "fsn1" ssh-key-name)
master-conn (.apply (.-ipv4Address master) ssh-connection)
install-master (command/Command. "install-master"
#js {:connection master-conn
:create (.apply (.-ipv4Address master)
install-master-script)})
token-cmd (-> (.-stdout install-master)
(.apply (fn [_]
(command/Command. "get-token"
#js {:connection master-conn
:create "cat /var/lib/rancher/k3s/server/node-token"}))))
token-stdout (-> token-cmd (.apply (fn [cmd] (.-stdout cmd))))
worker-script (install-worker-script (.-ipv4Address master) token-stdout)
worker-de (hcloud-server "k3s-worker-de" "cx22" "fsn1" ssh-key-name :user-data worker-script)
worker-us (hcloud-server "k3s-worker-us" "cpx11" "ash" ssh-key-name :user-data worker-script)
kubeconfig-cmd (-> (pulumi/all #js [(.-stdout install-master) (.-ipv4Address master)])
(.apply (fn [[_ master-ip]]
(command/Command. "get-kubeconfig"
#js {:connection master-conn
:create (str "sleep 10 &&" "sed 's/127.0.0.1/" master-ip "/' /etc/rancher/k3s/k3s.yaml")}))))
kubeconfig-stdout (-> kubeconfig-cmd (.apply (fn [cmd] (.-stdout cmd))))
all-workers-ready (pulumi/all #js [(.-urn worker-de) (.-urn worker-us)])
ready-kubeconfig (pulumi/all #js [kubeconfig-stdout all-workers-ready]
(fn [[kc _]] kc))
k8s-provider (k8s/Provider. "k8s-provider"
#js {:kubeconfig ready-kubeconfig})]
(-> (pulumi/all #js [(.-ipv4Address master)
(.-ipv4Address worker-de)
(.-ipv4Address worker-us)
kubeconfig-stdout])
(.apply (fn [[master-ip worker-de-ip worker-us-ip kc]]
(js-obj
"masterIp" master-ip
"workerDeIp" worker-de-ip
"workerUsIp" worker-us-ip
"kubeconfig" (pulumi/secret kc)))))))
(set! (.-main js/module) main!)