First hetzner changes
This commit is contained in:
parent
460accd206
commit
2dc555a9a3
|
|
@ -0,0 +1,2 @@
|
|||
kubespray
|
||||
venv
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
# Bootstrap
|
||||
|
||||
The following lines document how to initalize a fresh cluster. On a real cluster, or using Vagrant. It therefore assumes to clone kubespray to this folder. It will be excluded in *.gitignore* and all files are in this folder.
|
||||
|
||||
Use [kubespray tag](https://github.com/kubernetes-sigs/kubespray/releases) as parameter
|
||||
|
||||
```sh
|
||||
./init.sh "release-2.14"
|
||||
```
|
||||
|
||||
See [kubespray.io](https://kubespray.io/) oj detailed information about kubespray. Though it seems to be a littlebit outdated.
|
||||
|
||||
## Vagrant
|
||||
|
||||
```sh
|
||||
cd kubespray
|
||||
vagrant up
|
||||
# up and abkle to ssh
|
||||
vagrant ssh k8s-1
|
||||
```
|
||||
|
||||
## Prod
|
||||
|
||||
Prepare server:
|
||||
* deactivate swap!
|
||||
* `moritz username ALL=(ALL) NOPASSWD:ALL`
|
||||
* `
|
||||
|
||||
```sh
|
||||
ssh centos@<ip>
|
||||
# auth via pw
|
||||
sudo su - root
|
||||
adduser moritz
|
||||
visudo # add as sudo user
|
||||
su - moritz
|
||||
sudo yum -y install vim python3
|
||||
ssh-keygen
|
||||
vim .ssh/authorized_users # paste key
|
||||
chmod 644 .ssh/authorized_keys
|
||||
# check whether login works with ssh key
|
||||
sudo vim /etc/ssh/sshd_config # remove pw auth & root login
|
||||
sudo yum upgrade -y && sudo reboot
|
||||
```
|
||||
|
||||
Install Kubernetes:
|
||||
|
||||
```sh
|
||||
. ./init.sh
|
||||
# follow instructions from output, sth like:
|
||||
cd kubespray
|
||||
ansible-playbook -i inventory/prod/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
And get credentials:
|
||||
|
||||
```sh
|
||||
ssh <ip>
|
||||
sudo su - root
|
||||
cd
|
||||
cp -r .kube /home/moritz/
|
||||
chown -R moritz. /home/moritz/.kube
|
||||
#ctrl + d
|
||||
kubectl get ns # test connection
|
||||
#ctrl + d
|
||||
scp haumdaucher.de:/home/moritz/.kube/config .kube/config
|
||||
```
|
||||
|
||||
Foreward in k8s-directory.
|
||||
|
||||
## Upgrade cluster
|
||||
|
||||
Check the current default value of `kube_version` in cloned repository.
|
||||
|
||||
```sh
|
||||
cd kubespray
|
||||
ansible-playbook -i inventory/prod/inventory.ini -e kube_version=v1.18.8 -e upgrade_cluster_setup=true cluster.yml
|
||||
# or just the newest version
|
||||
ansible-playbook -i inventory/prod/inventory.ini -e upgrade_cluster_setup=true cluster.yml
|
||||
# upgrade to specific calico version (did not trigger/ failed)
|
||||
ansible-playbook -i inventory/prod/inventory.ini -e upgrade_cluster_setup=true -e calico_version=v3.15.2 cluster.yml --tags=network
|
||||
```
|
||||
|
||||
History:
|
||||
|
||||
* 2020-04-18 kube_version=v1.16.8 kubespray_branch=release-2.12
|
||||
|
||||
## Add node
|
||||
|
||||
See [documentation](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/nodes.md).
|
||||
|
||||
Note: This was more or less a trial and error approach. Running different playbooks over and over again got it right at some point.
|
||||
|
||||
```sh
|
||||
ansible-playbook -i inventory/prod/inventory.ini --limit=ns3088070.ip-37-59-40.eu,ns3100058.ip-37-59-61.eu scale.yml
|
||||
ansible-playbook -i inventory/prod/inventory.ini --limit=etcd,kube-master -e ignore_assert_errors=yes cluster.yml
|
||||
```
|
||||
|
||||
This runs everything and is kind of idempotent:
|
||||
|
||||
```sh
|
||||
ansible-playbook -i inventory/prod/inventory.ini cluster.yml
|
||||
```
|
||||
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
|
||||
[all]
|
||||
# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1
|
||||
# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2
|
||||
# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3
|
||||
# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4
|
||||
# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5
|
||||
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
|
||||
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||
|
||||
[kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
[etcd]
|
||||
# node1
|
||||
# node2
|
||||
# node3
|
||||
|
||||
[kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
[calico-rr]
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
#kube-node
|
||||
#calico-rr
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
#!/bin/sh
|
||||
|
||||
# check whether script is sourced
|
||||
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && echo "script is executed, but needs to be sourced" && exit 1
|
||||
|
||||
echo "######################################################################################"
|
||||
echo "## Reinit repository"
|
||||
rm -rf kubespray
|
||||
VERSION=${1:-release-2.14}
|
||||
git clone --branch $VERSION https://github.com/kubernetes-sigs/kubespray.git
|
||||
|
||||
echo "######################################################################################"
|
||||
echo "## Activating pyenv venv"
|
||||
eval "$(pyenv init -)"
|
||||
eval "$(pyenv virtualenv-init -)"
|
||||
pyenv virtualenv 3.8.3 infrapuzzle-bootstrap
|
||||
pyenv activate infrapuzzle-bootstrap
|
||||
python -m pip install -r kubespray/requirements.txt
|
||||
|
||||
|
||||
echo "######################################################################################"
|
||||
echo "## Customizing vagrant dev env"
|
||||
mkdir -p kubespray/vagrant
|
||||
cat << EOF > kubespray/vagrant/config.rb
|
||||
\$instance_name_prefix = "k8s"
|
||||
\$vm_cpus = 4
|
||||
\$num_instances = 1
|
||||
\$os = "centos"
|
||||
\$subnet = "10.0.20"
|
||||
\$network_plugin = "calico"
|
||||
\$shared_folders = { 'temp/docker_rpms' => "/var/cache/yum/x86_64/7/docker-ce/packages" }
|
||||
\$kube_node_instances_with_disks_number = 0
|
||||
EOF
|
||||
|
||||
# make the rpm cache
|
||||
mkdir -p kubespray/temp/docker_rpms
|
||||
|
||||
echo "###############"
|
||||
echo "Now cd to kubespray and 'vagrant up'"
|
||||
echo ""
|
||||
echo "export KUBECONFIG=\"$( pwd )/kubespray/inventory/sample/artifacts/admin.conf\""
|
||||
|
||||
|
||||
echo "######################################################################################"
|
||||
echo "## Preparing real prod environment"
|
||||
cp -r kubespray/inventory/sample kubespray/inventory/prod
|
||||
rm kubespray/inventory/prod/inventory.ini
|
||||
cp ./prod.ini kubespray/inventory/prod/inventory.ini
|
||||
#gsed -i "s/kube_network_plugin: .*/kube_network_plugin: flannel/" ./kubespray/inventory/prod/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
gsed -i "s/# calico_iptables_backend: \"Legacy\"/calico_iptables_backend: \"NFT\"/" ./kubespray/inventory/prod/group_vars/k8s-cluster/k8s-net-calico.yml
|
||||
#echo 'calico_iptables_backend: "NFT"' >> ./kubespray/inventory/prod/group_vars/k8s-cluster/k8s-net-cluster.yml
|
||||
#gsed -i "s/metrics_server_enabled: .*/metrics_server_enabled: true/" ./kubespray/inventory/prod/group_vars/k8s-cluster/addons.yml
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||
# ## different ip than the default iface
|
||||
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
|
||||
[all]
|
||||
haumdaucher ansible_host=136.243.23.215 etcd_member_name=etcd1 ansible_become=yes ansible_become_method=sudo ansible_python_interpreter=/usr/bin/python3 metrics_server_enabled=false
|
||||
#ns3088070.ip-37-59-40.eu ansible_host=37.59.40.95 ansible_become=yes ansible_become_method=sudo ansible_python_interpreter=/usr/bin/python3
|
||||
#ns3100058.ip-37-59-61.eu ansible_host=37.59.61.198 ansible_become=yes ansible_become_method=sudo ansible_python_interpreter=/usr/bin/python3
|
||||
# node1 ansible_host=95.54.0.12 # ip=10.3.0.1 etcd_member_name=etcd1
|
||||
# node2 ansible_host=95.54.0.13 # ip=10.3.0.2 etcd_member_name=etcd2
|
||||
# node3 ansible_host=95.54.0.14 # ip=10.3.0.3 etcd_member_name=etcd3
|
||||
# node4 ansible_host=95.54.0.15 # ip=10.3.0.4 etcd_member_name=etcd4
|
||||
# node5 ansible_host=95.54.0.16 # ip=10.3.0.5 etcd_member_name=etcd5
|
||||
# node6 ansible_host=95.54.0.17 # ip=10.3.0.6 etcd_member_name=etcd6
|
||||
|
||||
# ## configure a bastion host if your nodes are not directly reachable
|
||||
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||
|
||||
[kube-master]
|
||||
haumdaucher
|
||||
#ns3088070.ip-37-59-40.eu
|
||||
|
||||
[etcd]
|
||||
haumdaucher
|
||||
|
||||
[kube-node]
|
||||
haumdaucher
|
||||
|
||||
[calico-rr]
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
calico-rr
|
||||
|
|
@ -83,10 +83,14 @@ $ kubectl delete -f kuard
|
|||
|
||||
Update with the follwoing command. Chart can be found [here](https://github.com/openebs/charts/tree/master/charts/openebs).
|
||||
|
||||
Pitfal:
|
||||
* On fresh installation: activate *ndmOperator*, so that CRDs are correctly installed. It may be deactivated afterwards.
|
||||
|
||||
```sh
|
||||
helm repo add openebs https://openebs.github.io/charts
|
||||
helm repo update
|
||||
helm upgrade --install -f openebs/openebs.yml openebs --namespace openebs openebs/openebs
|
||||
helm upgrade --install --create-namespace -f openebs/openebs.yml openebs --namespace openebs openebs/openebs
|
||||
k apply -f openebs/storageclass.yml
|
||||
```
|
||||
|
||||
## minio
|
||||
|
|
@ -118,7 +122,8 @@ kubectl delete ns velero
|
|||
A backup may be created using:
|
||||
|
||||
```sh
|
||||
velero backup create full-backup --default-volumes-to-restic --include-namespaces datalab,development,nextcloud,tt-rss,zebrium --wait
|
||||
DATE=$( date +%Y%m%d )
|
||||
velero backup create $DATE --default-volumes-to-restic --include-namespaces datalab,development,nextcloud,tt-rss,zebrium --wait
|
||||
```
|
||||
|
||||
## Add private docker registry
|
||||
|
|
@ -129,7 +134,7 @@ USER='moritz'
|
|||
PASSWORD='xxx'
|
||||
docker run --entrypoint htpasswd --rm registry:2 -Bbn $USER $PASSWORD
|
||||
# #
|
||||
helm upgrade --install docker-registry stable/docker-registry -n development -f development/registry.secret.yaml
|
||||
helm upgrade --install --create-namespace docker-registry stable/docker-registry -n development -f development/registry.secret.yaml
|
||||
##kubectl apply -f development/registry.secret.yaml
|
||||
```
|
||||
|
||||
|
|
@ -198,8 +203,19 @@ EOF
|
|||
|
||||
## metrics-server
|
||||
|
||||
Getting resources (was already done):
|
||||
|
||||
```sh
|
||||
helm upgrade --install -f kube-system/metrics-server.yaml metrics-server -n kube-system stable/metrics-server
|
||||
cd kube-system
|
||||
curl -L -o metrics-server.yml https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
# add parameters to deployment:
|
||||
# - --kubelet-preferred-address-types=InternalIP
|
||||
# - --v=2
|
||||
# - --kubelet-insecure-tls
|
||||
```
|
||||
|
||||
```sh
|
||||
kubectl apply -n kube-system -f kube-system/metrics-server.yml
|
||||
```
|
||||
|
||||
## rstudio
|
||||
|
|
|
|||
|
|
@ -1,4 +0,0 @@
|
|||
args:
|
||||
- "--v=2"
|
||||
- "--kubelet-insecure-tls"
|
||||
- "--kubelet-preferred-address-types=InternalIP"
|
||||
|
|
@ -0,0 +1,189 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
name: system:aggregated-metrics-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --cert-dir=/tmp
|
||||
- --secure-port=4443
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --kubelet-use-node-status-port
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --v=2
|
||||
- --kubelet-insecure-tls
|
||||
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /livez
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
name: metrics-server
|
||||
ports:
|
||||
- containerPort: 4443
|
||||
name: https
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-dir
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-dir
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
group: metrics.k8s.io
|
||||
groupPriorityMinimum: 100
|
||||
insecureSkipTLSVerify: true
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
version: v1beta1
|
||||
versionPriority: 100
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
|
||||
apiserver:
|
||||
enabled: true
|
||||
analytics:
|
||||
enabled: false
|
||||
|
||||
|
|
@ -8,3 +9,5 @@ ndm:
|
|||
enabled: false
|
||||
snapshotOperator:
|
||||
enabled: true
|
||||
webhook:
|
||||
enabled: true
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
annotations:
|
||||
cas.openebs.io/config: "- name: StorageType\n value: \"hostpath\"\n- name: BasePath\n
|
||||
\ value: /var/openebs/local \n"
|
||||
openebs.io/cas-type: local
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
name: openebs-hostpath
|
||||
provisioner: openebs.io/local
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
Loading…
Reference in New Issue