2019-03-09 21:54:44 +01:00
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
2021-02-16 16:46:01 +01:00
{% for requires_unit in k3s_service_requires %}
Requires={{ requires_unit }}
{% endfor %}
2020-09-21 20:38:51 +02:00
Wants=network-online.target
2021-02-16 16:46:01 +01:00
{% for wants_unit in k3s_service_wants %}
Wants={{ wants_unit }}
{% endfor %}
{% for before_unit in k3s_service_before %}
Before={{ before_unit }}
{% endfor %}
2020-09-21 20:38:51 +02:00
After=network-online.target
2021-02-16 16:46:01 +01:00
{% for after_unit in k3s_service_after %}
After={{ after_unit }}
{% endfor %}
2019-03-09 21:54:44 +01:00
[Service]
2019-10-01 22:31:35 +02:00
Type={{ 'notify' if k3s_control_node else 'exec' }}
2021-12-19 19:59:42 +01:00
{% if k3s_service_env_vars is defined and k3s_service_env_vars is iterable %}
2021-12-20 22:34:15 +01:00
{% for env_var in k3s_service_env_vars %}
Environent={{ env_var }}={{ k3s_service_env_vars[env_var] }}
2021-12-19 19:59:42 +01:00
{% endfor %}
{% endif %}
{% if k3s_service_env_file is defined and k3s_service_env_file %}
EnvironmentFile={{ k3s_service_env_file }}
{% endif %}
2019-03-09 21:54:44 +01:00
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
2019-11-03 16:35:32 +01:00
{% filter regex_replace('\s+', ' ') %}
2019-10-26 23:49:48 +02:00
{% filter replace('\n', ' ') %}
ExecStart={{ k3s_install_dir }}/k3s
2020-09-21 19:44:11 +02:00
{% if k3s_debug is defined and k3s_debug %}
--debug
{% endif %}
2019-03-09 21:54:44 +01:00
{% if k3s_control_node %}
2020-09-15 19:10:25 +02:00
server
2021-02-27 20:02:49 +01:00
{% if (k3s_etcd_datastore is defined and k3s_etcd_datastore) and (k3s_primary_control_node is not defined or not k3s_primary_control_node) and k3s_controller_list | length > 1 %}
fix: restore clustering feature
For some weird reason, string booleans were set on `k3s_control_node` and `k3s_primary_control_node`, making their behavior non-obvious (for python `bool("false") == True`).
This fixes that problem, and BTW restores the ability to create clusters, which got lost with this bug.
After running the role against a cluster, see:
```sh
❯ ansible -i inventories/testing.yaml k8s_node -m command -ba 'kubectl get node'
vm0 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 9m19s v1.21.2+k3s1
vm2 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm2 Ready control-plane,etcd,master 9m22s v1.21.2+k3s1
vm1 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm1 Ready control-plane,etcd,master 9m22s v1.21.2+k3s1
```
Now, after the patch:
```sh
❯ ansible -i inventories/testing.yaml k8s_node -m command -ba 'kubectl get node'
vm0 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 2m2s v1.21.2+k3s1
vm1 Ready control-plane,etcd,master 58s v1.21.2+k3s1
vm2 Ready control-plane,etcd,master 80s v1.21.2+k3s1
vm1 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 2m2s v1.21.2+k3s1
vm1 Ready control-plane,etcd,master 58s v1.21.2+k3s1
vm2 Ready control-plane,etcd,master 80s v1.21.2+k3s1
vm2 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 2m2s v1.21.2+k3s1
vm1 Ready control-plane,etcd,master 58s v1.21.2+k3s1
vm2 Ready control-plane,etcd,master 80s v1.21.2+k3s1
```
@Tecnativa TT2541
2021-07-21 14:37:17 +02:00
--server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) | string }}
2020-10-23 17:31:21 +02:00
{% endif %}
2020-10-19 21:26:12 +02:00
{% if k3s_server is defined %}
--config {{ k3s_config_file }}
2020-07-04 14:24:10 +02:00
{% endif %}
2021-07-24 18:21:31 +02:00
{% if k3s_control_node and not k3s_primary_control_node %}
--token-file {{ k3s_token_location }}
{% endif %}
2019-03-09 21:54:44 +01:00
{% else %}
2019-10-26 23:49:48 +02:00
agent
fix: restore clustering feature
For some weird reason, string booleans were set on `k3s_control_node` and `k3s_primary_control_node`, making their behavior non-obvious (for python `bool("false") == True`).
This fixes that problem, and BTW restores the ability to create clusters, which got lost with this bug.
After running the role against a cluster, see:
```sh
❯ ansible -i inventories/testing.yaml k8s_node -m command -ba 'kubectl get node'
vm0 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 9m19s v1.21.2+k3s1
vm2 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm2 Ready control-plane,etcd,master 9m22s v1.21.2+k3s1
vm1 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm1 Ready control-plane,etcd,master 9m22s v1.21.2+k3s1
```
Now, after the patch:
```sh
❯ ansible -i inventories/testing.yaml k8s_node -m command -ba 'kubectl get node'
vm0 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 2m2s v1.21.2+k3s1
vm1 Ready control-plane,etcd,master 58s v1.21.2+k3s1
vm2 Ready control-plane,etcd,master 80s v1.21.2+k3s1
vm1 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 2m2s v1.21.2+k3s1
vm1 Ready control-plane,etcd,master 58s v1.21.2+k3s1
vm2 Ready control-plane,etcd,master 80s v1.21.2+k3s1
vm2 | CHANGED | rc=0 >>
NAME STATUS ROLES AGE VERSION
vm0 Ready control-plane,etcd,master 2m2s v1.21.2+k3s1
vm1 Ready control-plane,etcd,master 58s v1.21.2+k3s1
vm2 Ready control-plane,etcd,master 80s v1.21.2+k3s1
```
@Tecnativa TT2541
2021-07-21 14:37:17 +02:00
--server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) | string }}
2020-12-05 22:56:28 +01:00
--token-file {{ k3s_token_location }}
2020-10-19 21:26:12 +02:00
{% if k3s_agent is defined %}
--config {{ k3s_config_file }}
2020-09-24 16:21:48 +02:00
{% endif %}
2019-12-21 18:11:30 +01:00
{% endif %}
2019-10-26 23:49:48 +02:00
{% endfilter %}
2019-11-03 16:35:32 +01:00
{% endfilter %}
2019-10-26 23:49:48 +02:00
2019-03-09 21:54:44 +01:00
KillMode=process
Delegate=yes
2020-09-21 14:39:55 +02:00
LimitNOFILE=1048576
2019-03-09 21:54:44 +01:00
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
2019-10-01 22:31:35 +02:00
TimeoutStartSec=0
Restart=always
RestartSec=5s
2019-03-09 21:54:44 +01:00
[Install]
WantedBy=multi-user.target