Compare commits

...

6 Commits
v3.4.2 ... main

Author SHA1 Message Date
paradon 19c24bd503
Add scan for running control nodes when choosing primary control node (#219)
Signed-off-by: Thomas Matysik <thomas@matysik.co.nz>
2024-01-26 15:15:15 -05:00
fragpit 0c0d3bb38d
kubectl commands on node must use short name (#220)
Co-authored-by: Igor Tretyak <itretyak@ptsecurity.com>
2024-01-26 15:09:58 -05:00
davidg cfd9400edf
Containerd registries config not live (#222)
I found a bug where my custom containerd registries config wasn't live,
despite the correct `notify` handlers being specified in the
'Ensure containerd registries file exists' task.

This change fixes that by ensuring the handlers get triggered.
2024-01-26 15:08:18 -05:00
Devin Buhl 6b258763be
Update k3s killall and uninstall scripts (#217)
* Update k3s killall and uninstall scripts

* Update k3s-uninstall.sh.j2

* Update k3s-uninstall.sh.j2
2023-12-09 09:01:16 -05:00
fragpit b87991cc28
Compare `kubectl get nodes` with ansible_hostname, not ansible_fqdn/i… (#212)
Co-authored-by: Igor Tretyak <itretyak@ptsecurity.com>
2023-12-09 08:32:47 -05:00
matteyeux 37fda0a953
add support for experimental option "prefer-bundled-bin" (#214) 2023-10-27 11:22:57 -04:00
6 changed files with 90 additions and 21 deletions

View File

@ -26,7 +26,7 @@
- name: Ensure uninstalled nodes are drained # noqa no-changed-when
ansible.builtin.command:
cmd: >-
{{ k3s_install_dir }}/kubectl drain {{ item }}
{{ k3s_install_dir }}/kubectl drain {{ hostvars[item].ansible_hostname }}
--ignore-daemonsets
--{{ k3s_drain_command[ansible_version.string is version_compare('1.22', '>=')] }}
--force
@ -34,7 +34,7 @@
run_once: true
when:
- kubectl_get_nodes_result.stdout is defined
- item in kubectl_get_nodes_result.stdout
- hostvars[item].ansible_hostname in kubectl_get_nodes_result.stdout
- hostvars[item].k3s_state is defined
- hostvars[item].k3s_state == 'uninstalled'
loop: "{{ ansible_play_hosts }}"
@ -42,12 +42,12 @@
- name: Ensure uninstalled nodes are removed # noqa no-changed-when
ansible.builtin.command:
cmd: "{{ k3s_install_dir }}/kubectl delete node {{ item }}"
cmd: "{{ k3s_install_dir }}/kubectl delete node {{ hostvars[item].ansible_hostname }}"
delegate_to: "{{ k3s_control_delegate }}"
run_once: true
when:
- kubectl_get_nodes_result.stdout is defined
- item in kubectl_get_nodes_result.stdout
- hostvars[item].ansible_hostname in kubectl_get_nodes_result.stdout
- hostvars[item].k3s_state is defined
- hostvars[item].k3s_state == 'uninstalled'
loop: "{{ ansible_play_hosts }}"

View File

@ -55,6 +55,42 @@
- hostvars[item].k3s_control_node
loop: "{{ ansible_play_hosts }}"
- name: Ensure an existing primary k3s control node is defined if multiple are found and at least one is running
when:
- k3s_controller_list | length >= 1
- k3s_build_cluster is defined
- k3s_build_cluster
- k3s_control_delegate is not defined
block:
- name: Test if control plane is running
ansible.builtin.wait_for:
port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}"
host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}"
timeout: 5
register: k3s_control_node_running
ignore_errors: true
when: k3s_control_node
- name: List running control planes
ansible.builtin.set_fact:
k3s_running_controller_list: "{{ k3s_running_controller_list + [item] }}"
when:
- hostvars[item].k3s_control_node_running is not skipped
- hostvars[item].k3s_control_node_running is succeeded
loop: "{{ ansible_play_hosts }}"
- name: Choose first running node as delegate
ansible.builtin.set_fact:
k3s_control_delegate: "{{ k3s_running_controller_list[0] }}"
when: k3s_running_controller_list | length >= 1
- name: Ensure k3s_primary_control_node is set on the delegate
ansible.builtin.set_fact:
k3s_primary_control_node: true
when:
- k3s_control_delegate is defined
- inventory_hostname == k3s_control_delegate
- name: Ensure a primary k3s control node is defined if multiple are found in ansible_play_hosts
ansible.builtin.set_fact:
k3s_primary_control_node: true
@ -63,6 +99,7 @@
- inventory_hostname == k3s_controller_list[0]
- k3s_build_cluster is defined
- k3s_build_cluster
- k3s_control_delegate is not defined
- name: Ensure ansible_host is mapped to inventory_hostname
ansible.builtin.blockinfile:

View File

@ -44,3 +44,6 @@
- k3s_build_cluster is defined
- k3s_build_cluster
- k3s_registration_address is defined
- name: Flush Handlers
ansible.builtin.meta: flush_handlers

View File

@ -40,6 +40,27 @@ killtree() {
) 2>/dev/null
}
remove_interfaces() {
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
# Delete cni related interfaces
ip link delete cni0
ip link delete flannel.1
ip link delete flannel-v6.1
ip link delete kube-ipvs0
ip link delete flannel-wg
ip link delete flannel-wg-v6
# Restart tailscale
if [ -n "$(command -v tailscale)" ]; then
tailscale set --advertise-routes=
fi
}
getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
}
@ -47,7 +68,11 @@ getshims() {
killtree $({ set +x; } 2>/dev/null; getshims; set -x)
do_unmount_and_remove() {
awk -v path="$1" '$2 ~ ("^" path) { print $2 }' /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount "$0" && rm -rf "$0"'
set +x
while read -r _ path _; do
case "$path" in $1*) echo "$path" ;; esac
done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount -f "$0" && rm -rf "$0"'
set -x
}
do_unmount_and_remove '/run/k3s'
@ -59,12 +84,8 @@ do_unmount_and_remove '/run/netns/cni-'
# Remove CNI namespaces
ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
remove_interfaces
rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore
iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore
ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore

View File

@ -4,16 +4,17 @@ set -x
/usr/local/bin/k3s-killall.sh
if which systemctl; then
if command -v systemctl; then
systemctl disable k3s
systemctl reset-failed k3s
systemctl daemon-reload
fi
if which rc-update; then
if command -v rc-update; then
rc-update delete k3s default
fi
rm -f {{ k3s_systemd_unit_dir }}/k3s.service
rm -f {{ k3s_systemd_unit_dir }}/k3s.env
remove_uninstall() {
rm -f /usr/local/bin/k3s-uninstall.sh
@ -31,20 +32,25 @@ for cmd in kubectl crictl ctr; do
fi
done
for bin in {{ k3s_install_dir }}/k3s*; do
if [ -f "${bin}" ]; then
rm -f "${bin}"
fi
done
rm -rf {{ k3s_config_dir }}
rm -rf /run/k3s
rm -rf /run/flannel
rm -rf {{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}
rm -rf /var/lib/kubelet
rm -f {{ k3s_install_dir }}/k3s
rm -f /usr/local/bin/k3s-killall.sh
if type yum >/dev/null 2>&1; then
yum remove -y k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type rpm-ostree >/dev/null 2>&1; then
rpm-ostree uninstall k3s-selinux
rm -f /etc/yum.repos.d/rancher-k3s-common*.repo
elif type zypper >/dev/null 2>&1; then
uninstall_cmd="zypper remove -y k3s-selinux"
if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then
uninstall_cmd="transactional-update --no-selfupdate -d run $uninstall_cmd"
fi
$uninstall_cmd
rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo
fi

View File

@ -77,6 +77,7 @@ k3s_conf_build_cluster: "{{
# Empty array for counting the number of control plane nodes
k3s_controller_list: []
k3s_running_controller_list: []
# Control plane port default
k3s_control_plane_port: "{{ k3s_runtime_config['https-listen-port'] | default(6443) }}"
@ -176,6 +177,7 @@ k3s_experimental_config:
- setting: agent-token-file
- setting: cluster-reset
until: 1.19.5
- setting: prefer-bundled-bin
# Config items that should be marked as deprecated
k3s_deprecated_config: