Add HA option, change to yaml inventory, cleanup
- HA option for multiple server nodes using embedded etcd - Switch to yaml inventory file for easier editing and combining vars - Update to full ansible module names - Change master/node names to server/agent - Cleanup small linting errors - Add reboot playbook which staggers reboot to keep HA cluster up - Move playbooks to playbook directory Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
parent
1031ea3ce2
commit
df67c61ba5
37
README.md
37
README.md
|
@ -23,32 +23,37 @@ Master and nodes must have passwordless SSH access
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
First create a new directory based on the `sample` directory within the `inventory` directory:
|
First copy the sample inventory to `inventory.yml`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cp -R inventory/sample inventory/my-cluster
|
cp inventory-sample.yml inventory.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
Second, edit `inventory/my-cluster/hosts.ini` to match the system information gathered above. For example:
|
Second edit the inventory file to match your cluster setup. For example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
[master]
|
k3s_cluster:
|
||||||
192.16.35.12
|
children:
|
||||||
|
server:
|
||||||
[node]
|
hosts:
|
||||||
192.16.35.[10:11]
|
192.16.35.11
|
||||||
|
agent:
|
||||||
[k3s_cluster:children]
|
hosts:
|
||||||
master
|
192.16.35.12
|
||||||
node
|
192.16.35.13
|
||||||
```
|
```
|
||||||
|
|
||||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
If needed, you can also edit `vars` section at the bottom to match your environment.
|
||||||
|
|
||||||
|
If multiple hosts are in the server group the playbook will automatically setup k3s in HA mode with embedded etcd.
|
||||||
|
An odd number of server nodes is recommended (3,5,7). Read the offical documentation below for more information and options.
|
||||||
|
https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/
|
||||||
|
Using a loadbalancer or VIP as the API endpoint is preferred but not covered here.
|
||||||
|
|
||||||
|
|
||||||
Start provisioning of the cluster using the following command:
|
Start provisioning of the cluster using the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
ansible-playbook playbook/site.yml -i inventory.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
## Kubeconfig
|
## Kubeconfig
|
||||||
|
@ -56,5 +61,5 @@ ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
||||||
To get access to your **Kubernetes** cluster just
|
To get access to your **Kubernetes** cluster just
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
scp debian@master_ip:~/.kube/config ~/.kube/config
|
scp debian@server_ip:~/.kube/config ~/.kube/config
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[defaults]
|
[defaults]
|
||||||
nocows = True
|
nocows = True
|
||||||
roles_path = ./roles
|
roles_path = ./roles
|
||||||
inventory = ./hosts.ini
|
inventory = ./inventory.yml
|
||||||
|
|
||||||
remote_tmp = $HOME/.ansible/tmp
|
remote_tmp = $HOME/.ansible/tmp
|
||||||
local_tmp = $HOME/.ansible/tmp
|
local_tmp = $HOME/.ansible/tmp
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
k3s_cluster:
|
||||||
|
children:
|
||||||
|
server:
|
||||||
|
hosts:
|
||||||
|
192.16.35.11
|
||||||
|
agent:
|
||||||
|
hosts:
|
||||||
|
192.16.35.12
|
||||||
|
192.16.35.13
|
||||||
|
|
||||||
|
vars:
|
||||||
|
ansible_port: 22
|
||||||
|
ansible_user: debian
|
||||||
|
k3s_version: v1.25.5+k3s2
|
||||||
|
systemd_dir: /etc/systemd/system
|
||||||
|
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
|
||||||
|
api_port: 6443
|
||||||
|
extra_server_args: ""
|
||||||
|
extra_server_init_args: ""
|
||||||
|
extra_agent_args: ""
|
|
@ -1,3 +0,0 @@
|
||||||
*
|
|
||||||
!.gitignore
|
|
||||||
!sample/
|
|
|
@ -1,7 +0,0 @@
|
||||||
---
|
|
||||||
k3s_version: v1.22.3+k3s1
|
|
||||||
ansible_user: debian
|
|
||||||
systemd_dir: /etc/systemd/system
|
|
||||||
master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}"
|
|
||||||
extra_server_args: ""
|
|
||||||
extra_agent_args: ""
|
|
|
@ -1,12 +0,0 @@
|
||||||
[master]
|
|
||||||
192.168.1.26
|
|
||||||
|
|
||||||
[node]
|
|
||||||
192.168.1.34
|
|
||||||
192.168.1.39
|
|
||||||
192.168.1.16
|
|
||||||
192.168.1.32
|
|
||||||
|
|
||||||
[k3s_cluster:children]
|
|
||||||
master
|
|
||||||
node
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
- name: Reboot cluster
|
||||||
|
hosts: k3s_cluster
|
||||||
|
become: true
|
||||||
|
gather_facts: true
|
||||||
|
tasks:
|
||||||
|
- name: Reboot
|
||||||
|
ansible.builtin.reboot:
|
|
@ -0,0 +1,7 @@
|
||||||
|
---
|
||||||
|
- name: Undo cluster setup
|
||||||
|
hosts: k3s_cluster
|
||||||
|
gather_facts: true
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: reset
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
- name: Cluster prep
|
||||||
|
hosts: k3s_cluster
|
||||||
|
gather_facts: true
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: prereq
|
||||||
|
- role: download
|
||||||
|
- role: raspberrypi
|
||||||
|
|
||||||
|
- name: Setup K3S server
|
||||||
|
hosts: server
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: k3s/server
|
||||||
|
|
||||||
|
- name: Setup K3S agent
|
||||||
|
hosts: node
|
||||||
|
become: true
|
||||||
|
roles:
|
||||||
|
- role: k3s/agent
|
|
@ -1,8 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
- name: Reset K3s cluster
|
|
||||||
hosts: k3s_cluster
|
|
||||||
gather_facts: yes
|
|
||||||
become: yes
|
|
||||||
roles:
|
|
||||||
- role: reset
|
|
|
@ -1,5 +1,4 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Download k3s binary x64
|
- name: Download k3s binary x64
|
||||||
ansible.builtin.get_url:
|
ansible.builtin.get_url:
|
||||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
||||||
|
|
|
@ -1,16 +1,15 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Copy K3s service file
|
- name: Copy K3s service file
|
||||||
ansible.builtin.template:
|
ansible.builtin.template:
|
||||||
src: "k3s.service.j2"
|
src: "k3s.service.j2"
|
||||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
dest: "{{ systemd_dir }}/k3s-agent.service"
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
||||||
- name: Enable and check K3s service
|
- name: Enable and check K3s service
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: k3s-node
|
name: k3s-agent
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
state: restarted
|
state: restarted
|
||||||
enabled: yes
|
enabled: yes
|
|
@ -7,7 +7,7 @@ After=network-online.target
|
||||||
Type=notify
|
Type=notify
|
||||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||||
ExecStartPre=-/sbin/modprobe overlay
|
ExecStartPre=-/sbin/modprobe overlay
|
||||||
ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }}
|
ExecStart=/usr/local/bin/k3s agent --server https://{{ api_endpoint }}:{{ api_port }} --token {{ hostvars[groups['server'][0]]['token'] }} {{ extra_agent_args | default("") }}
|
||||||
KillMode=process
|
KillMode=process
|
||||||
Delegate=yes
|
Delegate=yes
|
||||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
|
@ -1,2 +0,0 @@
|
||||||
---
|
|
||||||
k3s_server_location: /var/lib/rancher/k3s
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
k3s_server_location: /var/lib/rancher/k3s
|
||||||
|
server_init_args: >-
|
||||||
|
{% if groups['server'] | length > 1 %}
|
||||||
|
--token {{ k3s_token }}
|
||||||
|
{% if ansible_host == hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) %}
|
||||||
|
--cluster-init
|
||||||
|
{% else %}
|
||||||
|
--server https://{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}:{{ api_port }}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{{ extra_server_args | default('') }}
|
|
@ -1,4 +1,25 @@
|
||||||
---
|
---
|
||||||
|
- name: Init cluster using temporary service
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }}"
|
||||||
|
creates: "{{ systemd_dir }}/k3s.service"
|
||||||
|
|
||||||
|
- name: Verification
|
||||||
|
block:
|
||||||
|
- name: Verify that all nodes joined
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/control-plane=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||||
|
register: nodes
|
||||||
|
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['server'] | length)
|
||||||
|
retries: 20
|
||||||
|
delay: 10
|
||||||
|
changed_when: false
|
||||||
|
always:
|
||||||
|
- name: Kill the temporary init service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s-init
|
||||||
|
state: stopped
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
- name: Copy K3s service file
|
- name: Copy K3s service file
|
||||||
register: k3s_service
|
register: k3s_service
|
||||||
|
@ -30,12 +51,12 @@
|
||||||
path: "{{ k3s_server_location }}/server/node-token"
|
path: "{{ k3s_server_location }}/server/node-token"
|
||||||
mode: "g+rx,o+rx"
|
mode: "g+rx,o+rx"
|
||||||
|
|
||||||
- name: Read node-token from master
|
- name: Read node-token from server
|
||||||
ansible.builtin.slurp:
|
ansible.builtin.slurp:
|
||||||
path: "{{ k3s_server_location }}/server/node-token"
|
path: "{{ k3s_server_location }}/server/node-token"
|
||||||
register: node_token
|
register: node_token
|
||||||
|
|
||||||
- name: Store Master node-token
|
- name: Store server node-token
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||||
|
|
||||||
|
@ -59,10 +80,10 @@
|
||||||
owner: "{{ ansible_user }}"
|
owner: "{{ ansible_user }}"
|
||||||
mode: "u=rw,g=,o="
|
mode: "u=rw,g=,o="
|
||||||
|
|
||||||
- name: Replace https://localhost:6443 by https://master-ip:6443
|
- name: Change server to API endpoint instead of localhost
|
||||||
ansible.builtin.command: >-
|
ansible.builtin.command: >-
|
||||||
/usr/local/bin/k3s kubectl config set-cluster default
|
/usr/local/bin/k3s kubectl config set-cluster default
|
||||||
--server=https://{{ master_ip }}:6443
|
--server=https://{{ api_endpoint }}:{{ api_port }}
|
||||||
--kubeconfig ~{{ ansible_user }}/.kube/config
|
--kubeconfig ~{{ ansible_user }}/.kube/config
|
||||||
changed_when: true
|
changed_when: true
|
||||||
|
|
|
@ -9,14 +9,14 @@
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
|
|
||||||
- name: Enable IPv6 forwarding
|
- name: Enable IPv6 forwarding
|
||||||
ansible.posix.sysctl:
|
ansible.posix.sysctl:
|
||||||
name: net.ipv6.conf.all.forwarding
|
name: net.ipv6.conf.all.forwarding
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
when: ansible_all_ipv6_addresses
|
when: ansible_all_ipv6_addresses
|
||||||
|
|
||||||
- name: Add br_netfilter to /etc/modules-load.d/
|
- name: Add br_netfilter to /etc/modules-load.d/
|
||||||
|
@ -37,7 +37,7 @@
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
value: "1"
|
value: "1"
|
||||||
state: present
|
state: present
|
||||||
reload: yes
|
reload: true
|
||||||
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux','RedHat']
|
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux','RedHat']
|
||||||
loop:
|
loop:
|
||||||
- net.bridge.bridge-nf-call-iptables
|
- net.bridge.bridge-nf-call-iptables
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
---
|
---
|
||||||
- name: Reboot
|
- name: Reboot Pis
|
||||||
ansible.builtin.reboot:
|
ansible.builtin.reboot:
|
||||||
|
|
|
@ -8,8 +8,9 @@
|
||||||
with_items:
|
with_items:
|
||||||
- k3s
|
- k3s
|
||||||
- k3s-node
|
- k3s-node
|
||||||
|
- k3s-init
|
||||||
|
|
||||||
- name: Pkill k3s container runtimes"
|
- name: Kill container shim
|
||||||
register: pkill_containerd_shim_runc
|
register: pkill_containerd_shim_runc
|
||||||
ansible.builtin.command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
ansible.builtin.command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
|
||||||
changed_when: "pkill_containerd_shim_runc.rc == 0"
|
changed_when: "pkill_containerd_shim_runc.rc == 0"
|
||||||
|
@ -37,6 +38,6 @@
|
||||||
- /var/lib/kubelet
|
- /var/lib/kubelet
|
||||||
- /var/lib/rancher/k3s
|
- /var/lib/rancher/k3s
|
||||||
|
|
||||||
- name: Daemon_reload
|
- name: Systemd daemon reload
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
daemon_reload: yes
|
daemon_reload: yes
|
||||||
|
|
22
site.yml
22
site.yml
|
@ -1,22 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
- name: "Setup K3s Cluster"
|
|
||||||
hosts: k3s_cluster
|
|
||||||
gather_facts: yes
|
|
||||||
become: yes
|
|
||||||
roles:
|
|
||||||
- role: prereq
|
|
||||||
- role: download
|
|
||||||
- role: raspberrypi
|
|
||||||
|
|
||||||
- name: "Server Setup"
|
|
||||||
hosts: master
|
|
||||||
become: yes
|
|
||||||
roles:
|
|
||||||
- role: k3s/master # noqa: role-name[path]
|
|
||||||
|
|
||||||
- name: "Agent Setup"
|
|
||||||
hosts: node
|
|
||||||
become: yes
|
|
||||||
roles:
|
|
||||||
- role: k3s/node # noqa: role-name[path]
|
|
Loading…
Reference in New Issue