diff --git a/k8s/README.md b/k8s/README.md index eea6d09..cb9a0c4 100644 --- a/k8s/README.md +++ b/k8s/README.md @@ -778,7 +778,9 @@ An autonomous AI agent platform. 1. **Create Namespace**: `kubectl apply -f openclaw/namespace.yaml` 2. **Configure Secrets**: * Edit `openclaw/openclaw.secret.yaml`. - * Replace `change-me` with your Gemini API Key. + * **Gemini**: Replace `change-me` with your Gemini API Key. + * **Telegram**: Replace `telegram-bot-token` with your Bot Token. + * **Gateway**: Token is pre-filled (randomly generated). Change if desired. * **Encrypt**: Ensure the file is encrypted with `git crypt` before committing! 3. **Deploy**: `kubectl apply -f openclaw/openclaw.secret.yaml` 4. **Access**: `https://openclaw.haumdaucher.de` diff --git a/k8s/llm/ollama.yaml b/k8s/llm/ollama.yaml index 00f3fd5..6d36362 100644 --- a/k8s/llm/ollama.yaml +++ b/k8s/llm/ollama.yaml @@ -64,10 +64,18 @@ spec: port: http initialDelaySeconds: 30 periodSeconds: 5 - lifecycle: - postStart: - exec: - command: ["/bin/sh", "-c", "sleep 10; ollama pull llama3.1:8b-instruct-q8_0"] + command: ["/bin/sh", "-c"] + args: + - | + # Start Ollama in background + /bin/ollama serve & + PID=$! + echo "Waiting for Ollama..." + sleep 10 + echo "Pulling model..." + ollama pull llama3.1:8b-instruct-q8_0 + echo "Model pulled. Keeping container alive." + wait $PID volumes: - name: ollama-storage persistentVolumeClaim: diff --git a/k8s/openclaw/openclaw.secret.yaml b/k8s/openclaw/openclaw.secret.yaml index 0eae2c0..0862bf8 100644 Binary files a/k8s/openclaw/openclaw.secret.yaml and b/k8s/openclaw/openclaw.secret.yaml differ