From 77d02481beefd976c60fb5cdaa1367fe7f87b967 Mon Sep 17 00:00:00 2001 From: Moritz Graf Date: Sat, 7 Feb 2026 11:55:17 +0100 Subject: [PATCH] First draft of openclaw. --- README.md | 19 +++++ k8s/llm/llama_cpp_hosting.yaml | 135 ------------------------------ k8s/llm/ollama.yaml | 101 ++++++++++++++++++++++ k8s/openclaw/AGENTS.md | 22 +++++ k8s/openclaw/namespace.yaml | 4 + k8s/openclaw/openclaw.secret.yaml | Bin 0 -> 2571 bytes 6 files changed, 146 insertions(+), 135 deletions(-) delete mode 100644 k8s/llm/llama_cpp_hosting.yaml create mode 100644 k8s/llm/ollama.yaml create mode 100644 k8s/openclaw/AGENTS.md create mode 100644 k8s/openclaw/namespace.yaml create mode 100644 k8s/openclaw/openclaw.secret.yaml diff --git a/README.md b/README.md index 63cae4e..84c9607 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,25 @@ Infrapuzzle is the newly restructured way of implementing my private infrastruct [Documentation in subfolder](./k8s/README.md). The services themselfes. +## AI & Agents + +The cluster hosts local AI capabilities and agents. + +### LLM (Ollama) +Hosts a local LLM (Llama 3 8B) for inference. +* **Deploy**: `kubectl apply -f k8s/llm/ollama.yaml` +* **Verification**: Check pods in `llm` namespace. + +### OpenClaw +An autonomous AI agent platform. +1. **Create Namespace**: `kubectl apply -f k8s/openclaw/namespace.yaml` +2. **Configure Secrets**: + * Edit `k8s/openclaw/openclaw.secret.yaml`. + * Replace `change-me` with your Gemini API Key. + * **Encrypt**: Ensure the file is encrypted with `git crypt` before committing! +3. **Deploy**: `kubectl apply -f k8s/openclaw/openclaw.secret.yaml` +4. **Access**: `https://openclaw.haumdaucher.de` + ## Links used * [ingress via host network](https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network) diff --git a/k8s/llm/llama_cpp_hosting.yaml b/k8s/llm/llama_cpp_hosting.yaml deleted file mode 100644 index 8d4f4af..0000000 --- a/k8s/llm/llama_cpp_hosting.yaml +++ /dev/null @@ -1,135 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: llm - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: llama-cpp-server - namespace: llm -spec: - replicas: 1 - selector: - matchLabels: - app: llama-cpp-server - strategy: - type: Recreate - template: - metadata: - labels: - app: llama-cpp-server - spec: - initContainers: - - name: download-model - image: curlimages/curl - command: - - /bin/sh - - -c - - | - MODEL_URL="https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q8_0.gguf?download=true" - MODEL_FILE="/models/Meta-Llama-3-8B-Instruct.Q8_0.gguf" - # Purge everything except the desired model file - find /models -type f ! -name "$(basename $MODEL_FILE)" -delete - # Check if the model file does not exist and then download it - if [ ! -f $MODEL_FILE ]; then - curl -L -o $MODEL_FILE $MODEL_URL - fi - volumeMounts: - - name: model-storage - mountPath: /models - containers: - - name: llama-cpp-server - image: ghcr.io/ggerganov/llama.cpp:server - command: - - /server - - -m - - "/models/Meta-Llama-3-8B-Instruct.Q8_0.gguf" - - --port - - "8000" - - --host - - "0.0.0.0" - - -n - - "512" - resources: - requests: - memory: "18Gi" - cpu: 0.1 - volumeMounts: - - name: model-storage - mountPath: /models - volumes: - - name: model-storage - persistentVolumeClaim: - claimName: llama-model-pvc - ---- - -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: llama-model-pvc - namespace: llm -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: v1 -kind: Service -metadata: - name: llama-server-service - namespace: llm -spec: - type: ClusterIP - selector: - app: llama-cpp-server - ports: - - protocol: TCP - port: 8000 - targetPort: 8000 ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: llama-server-service - namespace: llm - annotations: - nginx.ingress.kubernetes.io/force-ssl-redirect: "true" - nginx.ingress.kubernetes.io/auth-realm: Authentication Required - llama webui - nginx.ingress.kubernetes.io/auth-secret: llama-auth - nginx.ingress.kubernetes.io/auth-type: basic - cert-manager.io/cluster-issuer: "letsencrypt-prod" - kubernetes.io/ingress.class: nginx -spec: - ingressClassName: nginx - tls: - - hosts: - - "llama.moritzgraf.de" - secretName: llama-moritzgraf-de - rules: - - host: llama.moritzgraf.de - http: - paths: - - backend: - service: - name: llama-server-service - port: - number: 8000 - path: / - pathType: Prefix ---- -apiVersion: v1 -data: - # fabian:stinkt - # $htpasswd -c auth fabian - # -> Creates file auth with creds, does not work in git repo. unkn why. - auth: ZmFiaWFuOiRhcHIxJHRTV3YzU3hOJHJPZEJ5WXhYdG4vbVJtSzhtaENWZy4K -kind: Secret -metadata: - name: llama-auth - namespace: llm -type: Opaque diff --git a/k8s/llm/ollama.yaml b/k8s/llm/ollama.yaml new file mode 100644 index 0000000..00f3fd5 --- /dev/null +++ b/k8s/llm/ollama.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ollama + namespace: llm + labels: + app: ollama +spec: + replicas: 1 + selector: + matchLabels: + app: ollama + template: + metadata: + labels: + app: ollama + spec: + initContainers: + - name: pull-model + image: curlimages/curl + command: ["/bin/sh", "-c"] + args: + - | + echo "Waiting for Ollama service..." + # Simple wait loop (naive check, better to use readiness probe/postStart but init runs before app) + # Actually, init container runs BEFORE the main container, so it can't interact with the main container's localhost. + # We need to perform the model pull *after* Ollama starts. + # Changing strategy: Use a postStart hook or sidecar. + # Or simpler: Just let it start, and rely on user/execution time pull, or use an entrypoint script wrapper in main container. + # Best approach for k8s simplicity: Use a command wrapper. + echo "Init container cannot pull because main container is not up. Skipping pre-pull in init." + echo "Model pull will require manual trigger or standard entrypoint behavior." + # To automate: We can run a sidecar that waits for port 11434 and then pulls. + containers: + - name: ollama + image: ollama/ollama:latest + env: + - name: OLLAMA_KEEP_ALIVE + value: "-1" + - name: OLLAMA_HOST + value: "0.0.0.0" + resources: + requests: + memory: "8Gi" + cpu: "2" + limits: + memory: "12Gi" + cpu: "4" + ports: + - containerPort: 11434 + name: http + volumeMounts: + - name: ollama-storage + mountPath: /root/.ollama + livenessProbe: + httpGet: + path: /api/health + port: http + initialDelaySeconds: 60 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /api/health + port: http + initialDelaySeconds: 30 + periodSeconds: 5 + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "sleep 10; ollama pull llama3.1:8b-instruct-q8_0"] + volumes: + - name: ollama-storage + persistentVolumeClaim: + claimName: ollama-storage +--- +apiVersion: v1 +kind: Service +metadata: + name: ollama + namespace: llm +spec: + type: ClusterIP + selector: + app: ollama + ports: + - port: 11434 + targetPort: 11434 + protocol: TCP +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ollama-storage + namespace: llm +spec: + accessModes: + - ReadWriteOnce + storageClassName: openebs-hostpath + resources: + requests: + storage: 50Gi diff --git a/k8s/openclaw/AGENTS.md b/k8s/openclaw/AGENTS.md new file mode 100644 index 0000000..16f3ef3 --- /dev/null +++ b/k8s/openclaw/AGENTS.md @@ -0,0 +1,22 @@ +# k8s/openclaw/AGENTS.md + +> [!NOTE] +> This directory contains the deployment configuration for **OpenClaw**, an open-source AI agent platform. + +## Overview +* **Namespace**: `openclaw` +* **Workload**: `openclaw` (Deployment) +* **Dependencies**: + * **LLM**: Connects to `ollama` in `llm` namespace. + * **Secrets**: Requires `GEMINI_API_KEY`. + +## Deployment +1. **Apply Namespace**: `kubectl apply -f namespace.yaml` +2. **Secrets**: + * Edit `openclaw.secret.yaml` to set `api-key`. + * Ensure `openclaw.secret.yaml` is encrypted with `git-crypt`. +3. **Apply Workload**: `kubectl apply -f openclaw.secret.yaml` + +## Configuration +* **LLM Provider**: `ollama` +* **Ollama URL**: `http://ollama.llm.svc.cluster.local:11434` diff --git a/k8s/openclaw/namespace.yaml b/k8s/openclaw/namespace.yaml new file mode 100644 index 0000000..394432e --- /dev/null +++ b/k8s/openclaw/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openclaw diff --git a/k8s/openclaw/openclaw.secret.yaml b/k8s/openclaw/openclaw.secret.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0eae2c0f954060d51c6f578768a285e305b2fe71 GIT binary patch literal 2571 zcmV+m3iR~=M@dveQdv+`0LoIkaXqi_(y~BD*Ia^*2#&)BZ!s1lXUN|Oleqk9*3M~ODGe=G-h<5lsQ0*Q zcqyfc&5tw_4CPaaf6)=@3EdMd;lWIM{oq&Uo7Ju`xLD7%+H;wn3Dv6=I)&g$HIm3>&Wp zHBy4lYUR$br_kUIOpm6sFu%oxR%5mKockr)%Z^}2TS~e(@bR)yQDsklHCqC?VH{|c zlSLh?g?|gL$$NHnm|ICvfNT$KtD7k9qK=D=!k-0m3kiVaBoK~(_1J#>wJM%;ANu>* z-K;*clkIaMsW}79-4Eyk(OFJg%aeNT=w6#`t;s7Fl@xlV1A3$w)>T>h!8eMcR&VCn zJ;gy+q9CQMjSl7Pa-AB9+}AL~8|w~T!fQMgRS!Jt99|U6e>2_QE^>UC?XP2YH9aat zZ}qj5=@UeG2QuO6;-_dgA>l~ShfvtOHl|7-z!|8{t>Zwbyo@ITin_qlF8JzeJR289 zJ5Nb%3)?=ILkunmkVc!55lPeCMi@4WJPKCT$kP9lT)45_Q*S;jg2j|%%0u#$FQAIue5+yx`je#883=I zy{$nrKM5s#+e92GDTd=i2t5Z!R+P;jP&8<=i9EPcF^qtpEotQ!K*lt9=_FBT0q%$P zk+wvbq=q-;EeQ3L0gbh!g)Ytd02W9;ZOp71@4ykjF&3|MTx!JG54mPopd_;6 zK-4Qbab2pFK-d+ck_4J^m?b=Frs5E((rBjNyPqpjfOSeh%*A}gth;7O#n<;r`Ts); zzT90VX~iMCb+c9c%%1oN--8)^E?|;PMgyf(6)-)k6)so((uw1VbDA}Kd(jKR?NLDP zUJ%oxza#Xj&D9Dtlr+g3qr=C`*KZWmi;tcKKTy|CvdbF0f zdU2gZXJ@m9{@AV*H$Uf9By(5AXqOeK1ub~$#2P$c|EZU*3);+pUw@&Qs$kPncVxt+ zK`dcw_iJW$_Pv9mhhiEw1%IvA{lN}{eLX!G)D$KTfN=`qW6(*2I%73CEDn?o0fw%s zBAmD(3XUtnDj1~9Wx)uGzHb5=)nP%e8vUw2GT|vD-zV{4(eN9LUNRLjKQU-X_;Xv; zAhNEapqg11RZx7r2tN04%jv-J2;fMCVN=ciOOMs2JAj>C8`2^({h*nV76hh)yZ5lX`(!@$1ixv-|V-zgoAsw|J`DJ)$ zV2)1em}sS1Q~H{7x(fh8a}6I~NU6Prl~At9L^lun5u>SVBwXwp;|>hO8X#197RM+Y zzsQOIyo@$Mldm7@qqFrLL=mj7vI#lSNP2(;z; zqt3Tez_1IvDmpK8v#C6*C7cbFFZZdKk8Y_5RnTmh=eW&W7ETQXaNn54IY7J}vHs9K z&dmu%X{fqwXF{_|4|g>2CQPzbP;56(WGHT~yCS{=nin~E4mg!u&(1PLp-#3PSY{Sw z`-{5T3$IEyDyV-rwdEn~hf-^IrobacsPM>|0z$_Y&=Uf;2sZ!r!YeV(CVMhl?mSR! z8rn+Gf3P>b{wK|Ag#P+wVXx*@WqQI|mLoa$KAeX88ibBv+*(l}=CAY^{a=V?V0F!(|8UP8s;n>u0JM zH&1vn?%vySxBHR_dYgX7!JFOIG#XR*?{a*g{9H|PHuCU`(|z5AHwp<$h_br~9vi0tq9m6Z89 zR0ifWANm8rO!3yfkGBu=rrrTEDK{)fz_ORGCTIyix;w;bnh5KLFz=%?HlQM-EoN)B zojSgv0>31UItMPNO?ehZrh+CB*g}9l#ii!n26Sv*h6C8$;I9jw(@80#Es&~~GMr~4 zIeE7IpL=aUq65F}c`pC${;)BLEKxC@YSZ5@ULH(f9BZ4zB#kMAi=zm+Ug?B-o+vWgBjMIh+-twV>dycG literal 0 HcmV?d00001