Upgrade completed successfully

This commit is contained in:
Moritz Graf 2025-12-01 22:46:51 +01:00
parent 295e1a2e68
commit 9d4585e7fb
8 changed files with 9 additions and 246 deletions

View File

@ -103,3 +103,12 @@ This runs everything and is kind of idempotent:
ansible-playbook -i inventory/prod/inventory.ini cluster.yml
```
## Upgrade to 2.31.3
Required to execute:
```
ansible-playbook -i inventory/prod/inventory.ini -e upgrade_cluster_setup=true -e drain_nodes=false upgrade-cluster.yml
```
+ set a feature flag: https://github.com/kubernetes-sigs/kubespray/issues/11887

View File

@ -1,78 +0,0 @@
# see roles/network_plugin/calico/defaults/main.yml
## With calico it is possible to distributed routes with border routers of the datacenter.
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
## The subnets of each nodes will be distributed by the datacenter router
# peer_with_router: false
# Enables Internet connectivity from containers
# nat_outgoing: true
# add default ippool name
# calico_pool_name: "default-pool"
# add default ippool blockSize (defaults kube_network_node_prefix)
# calico_pool_blocksize: 24
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
# calico_pool_cidr: 1.2.3.4/5
# Global as_num (/calico/bgp/v1/global/as_num)
# global_as_num: "64512"
# You can set MTU value here. If left undefined or empty, it will
# not be specified in calico CNI config, so Calico will use built-in
# defaults. The value should be a number, not a string.
# calico_mtu: 1500
# Configure the MTU to use for workload interfaces and tunnels.
# - If Wireguard is enabled, set to your network MTU - 60
# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
# - Otherwise, if IPIP is enabled, set to your network MTU - 20
# - Otherwise, if not using any encapsulation, set to your network MTU.
# calico_veth_mtu: 1440
# Advertise Cluster IPs
# calico_advertise_cluster_ips: true
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
# calico_datastore: "etcd"
# Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
#calico_iptables_backend: "NFT"
# Use typha (only with kdd)
# typha_enabled: false
# Generate TLS certs for secure typha<->calico-node communication
# typha_secure: false
# Scaling typha: 1 replica per 100 nodes is adequate
# Number of typha replicas
# typha_replicas: 1
# Set max typha connections
# typha_max_connections_lower_limit: 300
# Set calico network backend: "bird", "vxlan" or "none"
# bird enable BGP routing, required for ipip mode.
# calico_network_backend: bird
# IP in IP and VXLAN is mutualy exclusive modes.
# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
# calico_ipip_mode: 'Always'
# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
# calico_vxlan_mode: 'Never'
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
# * can-reach=DESTINATION
# * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/reference/node/configuration
# calico_ip_auto_method: "interface=eth.*"
# Choose the iptables insert mode for Calico: "Insert" or "Append".
# calico_felix_chaininsertmode: Insert
# If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
# calico_use_default_route_src_ipaddr: false

View File

@ -1,10 +0,0 @@
# see roles/network_plugin/canal/defaults/main.yml
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is choosing using the node's
# default route.
# canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
# canal_masquerade: "true"

View File

@ -1 +0,0 @@
# see roles/network_plugin/cilium/defaults/main.yml

View File

@ -1,20 +0,0 @@
# see roles/network_plugin/contiv/defaults/main.yml
# Forwarding mode: bridge or routing
# contiv_fwd_mode: routing
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
## In this case, you may need to peer with an uplink
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
# contiv_peer_with_uplink_leaf: false
# contiv_global_as: "65002"
# contiv_global_neighbor_as: "500"
# Fabric mode: aci, aci-opflex or default
# contiv_fabric_mode: default
# Default netmode: vxlan or vlan
# contiv_net_mode: vxlan
# Dataplane interface
# contiv_vlan_interface: ""

View File

@ -1,18 +0,0 @@
# see roles/network_plugin/flannel/defaults/main.yml
## interface that should be used for flannel operations
## This is actually an inventory cluster-level item
# flannel_interface:
## Select interface that should be used for flannel operations by regexp on Name or IP
## This is actually an inventory cluster-level item
## example: select interface with ip from net 10.0.0.0/23
## single quote and escape backslashes
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
# for experimental backend
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
# flannel_backend_type: "vxlan"
# flannel_vxlan_vni: 1
# flannel_vxlan_port: 8472

View File

@ -1,61 +0,0 @@
# See roles/network_plugin/kube-router//defaults/main.yml
# Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
# kube_router_run_router: true
# Enables Network Policy -- sets up iptables to provide ingress firewall for pods
# kube_router_run_firewall: true
# Enables Service Proxy -- sets up IPVS for Kubernetes Services
# see docs/kube-router.md "Caveats" section
# kube_router_run_service_proxy: false
# Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
# kube_router_advertise_cluster_ip: false
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
# kube_router_advertise_external_ip: false
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
# kube_router_advertise_loadbalancer_ip: false
# Adjust manifest of kube-router daemonset template with DSR needed changes
# kube_router_enable_dsr: false
# Array of arbitrary extra arguments to kube-router, see
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
# kube_router_extra_args: []
# ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
# kube_router_peer_router_asns: ~
# The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
# kube_router_peer_router_ips: ~
# The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
# kube_router_peer_router_ports: ~
# Setups node CNI to allow hairpin mode, requires node reboots, see
# https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
# kube_router_support_hairpin_mode: false
# Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
# kube_router_dns_policy: ClusterFirstWithHostNet
# Array of annotations for master
# kube_router_annotations_master: []
# Array of annotations for every node
# kube_router_annotations_node: []
# Array of common annotations for every node
# kube_router_annotations_all: []
# Enables scraping kube-router metrics with Prometheus
# kube_router_enable_metrics: false
# Path to serve Prometheus metrics on
# kube_router_metrics_path: /metrics
# Prometheus metrics port to use
# kube_router_metrics_port: 9255

View File

@ -1,58 +0,0 @@
# see roles/network_plugin/weave/defaults/main.yml
# Weave's network password for encryption, if null then no network encryption.
# weave_password: ~
# If set to 1, disable checking for new Weave Net versions (default is blank,
# i.e. check is enabled)
# weave_checkpoint_disable: false
# Soft limit on the number of connections between peers. Defaults to 100.
# weave_conn_limit: 100
# Weave Net defaults to enabling hairpin on the bridge side of the veth pair
# for containers attached. If you need to disable hairpin, e.g. your kernel is
# one of those that can panic if hairpin is enabled, then you can disable it by
# setting `HAIRPIN_MODE=false`.
# weave_hairpin_mode: true
# The range of IP addresses used by Weave Net and the subnet they are placed in
# (CIDR format; default 10.32.0.0/12)
# weave_ipalloc_range: "{{ kube_pods_subnet }}"
# Set to 0 to disable Network Policy Controller (default is on)
# weave_expect_npc: "{{ enable_network_policy }}"
# List of addresses of peers in the Kubernetes cluster (default is to fetch the
# list from the api-server)
# weave_kube_peers: ~
# Set the initialization mode of the IP Address Manager (defaults to consensus
# amongst the KUBE_PEERS)
# weave_ipalloc_init: ~
# Set the IP address used as a gateway from the Weave network to the host
# network - this is useful if you are configuring the addon as a static pod.
# weave_expose_ip: ~
# Address and port that the Weave Net daemon will serve Prometheus-style
# metrics on (defaults to 0.0.0.0:6782)
# weave_metrics_addr: ~
# Address and port that the Weave Net daemon will serve status requests on
# (defaults to disabled)
# weave_status_addr: ~
# Weave Net defaults to 1376 bytes, but you can set a smaller size if your
# underlying network has a tighter limit, or set a larger size for better
# performance if your network supports jumbo frames (e.g. 8916)
# weave_mtu: 1376
# Set to 1 to preserve the client source IP address when accessing Service
# annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
# only with Weave IPAM (default).
# weave_no_masq_local: true
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
# weave_extra_args: ~