Compare commits
412 Commits
Author | SHA1 | Date |
---|---|---|
paradon | 19c24bd503 | |
fragpit | 0c0d3bb38d | |
davidg | cfd9400edf | |
Devin Buhl | 6b258763be | |
fragpit | b87991cc28 | |
matteyeux | 37fda0a953 | |
Xan Manning | 37cca2e487 | |
Xan Manning | 41b938c8e7 | |
matteyeux | cc64737bdc | |
Devin Buhl | 3f1d2da21b | |
Xan Manning | 44635027ce | |
Daniel Brennand | de1bd094e5 | |
Daniel Brennand | 0cc1e48902 | |
Xan Manning | 13db5d26f8 | |
Xan Manning | 3f200f2bd7 | |
Xan Manning | 404491c938 | |
Jonaprince | 75b40675d8 | |
Xan Manning | 80e4debcd4 | |
Xan Manning | c28e03b97f | |
Xan Manning | 01616dcd96 | |
Xan Manning | 8410d2c402 | |
Xan Manning | a6b209abdb | |
Xan Manning | e9ddc8738a | |
Xan Manning | 1d29570fc9 | |
Xan Manning | 561d67cd08 | |
Xan Manning | dae3eb928e | |
Xan Manning | 21fe3bccbf | |
Xan Manning | 25a17b8511 | |
Xan Manning | d38f344937 | |
Xan Manning | 78cf2c1866 | |
Xan Manning | e774918812 | |
Xan Manning | 6f1cb8e904 | |
Xan Manning | e6cb2a91e8 | |
Xan Manning | 5bebced657 | |
Xan Manning | c1341eb62c | |
Xan Manning | 13ed1336d9 | |
Xan Manning | 5f560137f4 | |
Xan Manning | 910b611058 | |
Xan Manning | f3640e5c9f | |
Xan Manning | 291b7763b4 | |
Xan Manning | 86a9f25325 | |
Niklas Weimann | 503e3ccc3f | |
Xan Manning | 818676e449 | |
Xan Manning | 87551613d4 | |
Xan Manning | 03bc3aec5b | |
Xan Manning | e20195fe56 | |
Xan Manning | 4387b3d12e | |
Xan Manning | dc0f8c3a83 | |
Vladimir Romashchenko | d1f61bf866 | |
Xan Manning | 6550071e43 | |
Xan Manning | 594606d420 | |
Karsten Kosmala | 1475d1724d | |
Karsten Kosmala | 80eca60031 | |
Karsten Kosmala | 424145881c | |
Xan Manning | 3be9eff967 | |
Alejo Diaz | 410a5bf009 | |
Xan Manning | 252b87bf65 | |
Xan Manning | 1fa910f931 | |
Xan Manning | 2e5dd3cc07 | |
Xan Manning | e7693c5d2f | |
Alejo Diaz | 4f0bb3f9a7 | |
Xan Manning | 473f3943d2 | |
Xan Manning | 7e9292c01b | |
Xan Manning | a88d27d2ae | |
Xan Manning | 377565de96 | |
Xan Manning | 3be75a8296 | |
Xan Manning | b9b2a8e054 | |
Andrew Chen | 59af276c72 | |
Xan Manning | 2f7d6af51d | |
Xan Manning | 20468734a0 | |
Xan Manning | e983629167 | |
Xan Manning | 0873fc4977 | |
Xan Manning | 0fa1ef29a9 | |
Xan Manning | e457854046 | |
Xan Manning | cc8ba00de2 | |
Xan Manning | 592b294ad8 | |
Xan Manning | 9349c9456d | |
Xan Manning | 582a696918 | |
Xan Manning | 987bc700a1 | |
janar153 | d9d8bbeece | |
janar153 | 5288de9db1 | |
Xan Manning | df51a8aaec | |
Xan Manning | a4cbc4d68d | |
Xan Manning | 41a13ca2f7 | |
Andrew Chen | ce4ad4dc0b | |
Xan Manning | 58f4de5481 | |
Xan Manning | c287bef9cd | |
Xan Manning | 59f0a2152e | |
Xan Manning | 191d51bce6 | |
Xan Manning | 2a282c0ae2 | |
Xan Manning | 677db09b4a | |
Xan Manning | 4c20fd3f0b | |
Xan Manning | 1eaeba67b5 | |
Xan Manning | 09abfd2cba | |
Xan Manning | ccfa561be0 | |
Xan Manning | 0c77eb143d | |
Xan Manning | 4269e25e6b | |
Xan Manning | dd341f6f10 | |
Xan Manning | 01b914985a | |
Xan Manning | 0f143962a1 | |
Xan Manning | 80f591cba4 | |
Curtis John | dd3c460bfa | |
Curtis John | 825ed3ad37 | |
Curtis John | f7c0c8783a | |
Curtis John | 8243baa3d9 | |
Curtis John | 25d40cec52 | |
Curtis John | 779968ca0a | |
Curtis John | b8727a1c92 | |
Curtis John | 4bcf3ea9c4 | |
Curtis John | e88f3bb056 | |
janar153 | 29658aeb2e | |
janar153 | 33a18bb517 | |
Xan Manning | ea413afa3a | |
Xan Manning | da13cc696a | |
Xan Manning | db3f7da362 | |
Xan Manning | 765fbf2e9b | |
Xan Manning | c47688e05c | |
Xan Manning | 3274c7e6e0 | |
Xan Manning | 25ca0ed8f7 | |
Devin Buhl | 0384dfcb4f | |
Devin Buhl | 207fbbd41a | |
Devin Buhl | 9db46b536d | |
Xan Manning | 83290e050c | |
Xan Manning | 189f2baf23 | |
Xan Manning | 077c9a3fd6 | |
Xan Manning | 1780b5a20f | |
Xan Manning | cc86f35d9b | |
Xan Manning | dc2bd28e10 | |
Xan Manning | f198b45d58 | |
Anes Belfodil | c0ec5ca930 | |
Xan Manning | 8c0c586607 | |
Xan Manning | 3b26d24212 | |
Xan Manning | ba113bcd05 | |
Xan Manning | e90448f40b | |
Xan Manning | 4e713918a7 | |
Xan Manning | 3b5c6e6ff5 | |
Xan Manning | d2968d5f42 | |
Yajo | 4b42a9bf49 | |
Jairo Llopis | 142b40f428 | |
Yajo | 05e62b6344 | |
Xan Manning | 0c084531d2 | |
Jairo Llopis | b8539cd82e | |
Xan Manning | 2da5738452 | |
Xan Manning | 8dab5e6f26 | |
Xan Manning | 7607bfb7a9 | |
Xan Manning | f46450319b | |
Xan Manning | 10d11c63ec | |
Michael Robinson | 3006716f66 | |
Xan Manning | 730edbf6cb | |
Xan Manning | e5b9e5a78a | |
Xan Manning | c36c026783 | |
ᗪєνιη ᗷυнʟ | e7374757fa | |
ᗪєνιη ᗷυнʟ | 51de880c0f | |
Devin Buhl | b7210af4e9 | |
Devin Buhl | 2e629838f1 | |
Xan Manning | 7f0eb60a14 | |
Bᴇʀɴᴅ Sᴄʜᴏʀɢᴇʀs | 32c68ea949 | |
Bᴇʀɴᴅ Sᴄʜᴏʀɢᴇʀs | d834ca15b0 | |
Xan Manning | 6bff9b9981 | |
Bᴇʀɴᴅ Sᴄʜᴏʀɢᴇʀs | da7d8c67d9 | |
Bᴇʀɴᴅ Sᴄʜᴏʀɢᴇʀs | 1bbba04230 | |
ᗪєνιη ᗷυнʟ | 82085cb80b | |
ᗪєνιη ᗷυнʟ | 07fe0e2964 | |
ᗪєνιη ᗷυнʟ | 2243766695 | |
ᗪєνιη ᗷυнʟ | ef99954177 | |
Xan Manning | 50fa321e7e | |
Xan Manning | 4d5d5b2838 | |
Xan Manning | 7bb9f6d8b4 | |
Xan Manning | f220fce08f | |
Xan Manning | 2b7fd373f0 | |
赵安家 | d563dcca05 | |
赵安家 | 075ef165c5 | |
赵安家 | c9e2b619d1 | |
赵安家 | 21fa8b048f | |
Xan Manning | a298ea0985 | |
Xan Manning | ea03eaa9dd | |
Vegetto | 5305eb3758 | |
Xan Manning | 87c56dbe64 | |
Xan Manning | d2ca503432 | |
AnJia | 91d456ccad | |
Xan Manning | 2432a7d25f | |
Xan Manning | f4fcd2897d | |
Xan Manning | 60da06e137 | |
Bastien Dronneau | 9a13d67468 | |
Xan Manning | 03b29cb09d | |
Xan Manning | 265b529bb6 | |
Xan Manning | 55f1f09f3a | |
Xan Manning | 23054c76f6 | |
Xan Manning | e5c69ec894 | |
Xan Manning | a3c4d9cfae | |
Xan Manning | efca6fcbbc | |
Michael Williams | 6a70a85ef2 | |
Xan Manning | 4326f4497d | |
Xan Manning | 85576d62ed | |
Xan Manning | 94a153892e | |
Xan Manning | a8c5cd4407 | |
Xan Manning | 15141e9d86 | |
Xan Manning | 1d93c2115d | |
Xan Manning | 62b2d7cb36 | |
Xan Manning | 05242ba232 | |
Xan Manning | c2348df1ea | |
Xan Manning | 5b6242ecca | |
Xan Manning | f6e009f1fd | |
Xan Manning | 7e4a16e167 | |
Xan Manning | c80898d92a | |
Xan Manning | 5555bd3d9b | |
Xan Manning | 2c12436226 | |
Xan Manning | d4d24aec79 | |
Xan Manning | 43b5359160 | |
Xan Manning | e026d2a4a7 | |
Xan Manning | fc1149ac9e | |
Xan Manning | 3716774cc9 | |
Xan Manning | 1b4d3dd9dd | |
Antoine Bertin | c169cb8937 | |
Xan Manning | e954ba13c4 | |
Xan Manning | 8f0e9f22af | |
Xan Manning | 216af14fe1 | |
Xan Manning | a2e035cd1c | |
Xan Manning | 6d1a5f812b | |
Xan Manning | 75504b08b4 | |
Xan Manning | e7c714424c | |
Xan Manning | ef6c579336 | |
Xan Manning | 99c22dceab | |
Xan Manning | 151d36d19b | |
Xan Manning | 06fac01266 | |
Xan Manning | 01a8313dd9 | |
Xan Manning | e25edbef3c | |
Xan Manning | a067a97f38 | |
Xan Manning | e7ba779c91 | |
Xan Manning | e4059661ab | |
Xan Manning | 1d40c4d2c9 | |
Xan Manning | 34e2af3d47 | |
Xan Manning | 5d3524d729 | |
Xan Manning | 4afc2c8a5a | |
Xan Manning | 21adf94627 | |
Xan Manning | fa73be4921 | |
Xan Manning | 976fe8c0ca | |
Xan Manning | ebf32dbd99 | |
Xan Manning | cc59955b28 | |
Xan Manning | ddbf7a71a8 | |
Xan Manning | 603cabdb39 | |
Xan Manning | aea68db6c5 | |
Martin Friedrich | f9461f1951 | |
Xan Manning | 58db02a967 | |
Xan Manning | 66ee539862 | |
Xan Manning | a2075a7a76 | |
Martin Friedrich | dd40e73d6c | |
Martin Friedrich | dc571c375b | |
Xan Manning | 8c791cb611 | |
Xan Manning | a99087c7f6 | |
Xan Manning | 29c4936807 | |
Xan Manning | 1f74a599ee | |
Martin Friedrich | 4ed0727411 | |
Martin Friedrich | edc98a6d6e | |
Martin Friedrich | 04375f5e39 | |
Xan Manning | 170bf5995f | |
Xan Manning | a8dd9acdb9 | |
Xan Manning | e473064f61 | |
Xan Manning | 35b037c7ee | |
Martin Friedrich | e5133c1f73 | |
Xan Manning | 3d2b74c816 | |
Xan Manning | 57b9a2a0be | |
Xan Manning | 61f706acb9 | |
Xan Manning | 93b95a9813 | |
Xan Manning | 292c726b07 | |
Xan Manning | f3173f193f | |
Xan Manning | 6e29200d9a | |
Xan Manning | 9b800d9fba | |
Xan Manning | 36a2f24a9d | |
Xan Manning | 23cdd3edda | |
Xan Manning | a93403d312 | |
Xan Manning | 45a41f895b | |
Xan Manning | c63d984301 | |
Xan Manning | 72638e8e3d | |
Xan Manning | 9a15d8eddf | |
Xan Manning | 062c459b00 | |
Xan Manning | d52cda1d10 | |
Xan Manning | 57f9631265 | |
ᗪєνιη ᗷυнʟ | 6cf09c8efa | |
ᗪєνιη ᗷυнʟ | f39f228f39 | |
ᗪєνιη ᗷυнʟ | 2bb556f1da | |
ᗪєνιη ᗷυнʟ | 564d693e9d | |
ᗪєνιη ᗷυнʟ | d4c38f59cc | |
ᗪєνιη ᗷυнʟ | b06d1635f1 | |
ᗪєνιη ᗷυнʟ | 647d6026e4 | |
ᗪєνιη ᗷυнʟ | 7dd8a3f8ff | |
ᗪєνιη ᗷυнʟ | ddfc73586c | |
ᗪєνιη ᗷυнʟ | b16f142c21 | |
Xan Manning | 4b4a49bdd5 | |
ᗪєνιη ᗷυнʟ | c447fcec39 | |
Xan Manning | 4dd827c2a7 | |
Xan Manning | 1438ddde69 | |
Xan Manning | d0e209d866 | |
ᗪєνιη ᗷυнʟ | c99c9bf67f | |
ᗪєνιη ᗷυнʟ | 36d44bc1af | |
ᗪєνιη ᗷυнʟ | cc0c686e61 | |
ᗪєνιη ᗷυнʟ | 7ea82ed749 | |
ᗪєνιη ᗷυнʟ | 0129ec3e5c | |
Xan Manning | ab48e3a173 | |
Xan Manning | 175b90ecb0 | |
Xan Manning | c743df868b | |
Xan Manning | 230aaa110c | |
Xan Manning | 1f8429a77b | |
Xan Manning | b412858b30 | |
Xan Manning | d8a348923a | |
Xan Manning | 0bfbaa302e | |
Xan Manning | d53102dda3 | |
Xan Manning | 809e9cd73c | |
Xan Manning | d2a34546cf | |
Xan Manning | 504b84a8b6 | |
Xan Manning | 3a6b411430 | |
Xan Manning | f454334b42 | |
Xan Manning | 2c0afbca42 | |
Xan Manning | 9d04e315ae | |
Michael | f90cc5ca18 | |
Xan Manning | 848a5457ff | |
Xan Manning | 6090071982 | |
Xan Manning | 23ba527bc2 | |
Xan Manning | 9524b07df0 | |
Xan Manning | 141b6f2018 | |
Xan Manning | 5ce8dec6ff | |
Xan Manning | e3301a59e4 | |
Xan Manning | 02e12e61a8 | |
Xan Manning | b42ffade29 | |
Xan Manning | 26467de186 | |
Xan Manning | aa1a0a9620 | |
Xan Manning | 9b8cf85489 | |
nolte | df44053349 | |
Xan Manning | 681cd981ab | |
Xan Manning | c5a8f03b35 | |
SimonHeimberg | acedb08a1f | |
Xan Manning | bcb81e7c7d | |
Thibault Nélis | 9bace4a62f | |
Xan Manning | e93b438ee0 | |
Xan Manning | f684f6d907 | |
Xan Manning | f709caf371 | |
Xan Manning | 2c09d4711b | |
Xan Manning | 9dcfa954f9 | |
Xan Manning | 554fada914 | |
Xan Manning | 12d01c2a60 | |
Xan Manning | 84bf657f1c | |
Xan Manning | 241dc24d59 | |
Ryan Holt | 3f6ce99369 | |
Ryan Holt | db96168491 | |
Ryan Holt | c473f932c4 | |
Xan Manning | 56b2d7bc03 | |
Xan Manning | 75fd17aac8 | |
Devin Buhl | 5f7ff27f17 | |
Devin Buhl | a1e52fb660 | |
Devin Buhl | e7c787e10f | |
Devin Buhl | 8d0ee69012 | |
Devin Buhl | fd7498303d | |
Devin Buhl | be85c9ccc5 | |
Devin Buhl | 9bbf5fd746 | |
Devin Buhl | c4547306ce | |
Xan Manning | 31debb2f5d | |
Xan Manning | f82f90aae0 | |
Xan Manning | 5517671477 | |
Xan Manning | 1f19e2b302 | |
Xan Manning | 218b9d64c9 | |
Xan Manning | 3da7599eab | |
James D. Marble | 044ed5512c | |
Xan Manning | e07903a5cf | |
Xan Manning | 04a92ee956 | |
Xan Manning | 927fd41036 | |
Xan Manning | df253b504a | |
Xan Manning | c5b6dcd7fa | |
Xan Manning | e3ce213bc0 | |
Xan Manning | c8fb27ecd1 | |
Xan Manning | 3ef36b841f | |
Xan Manning | 3a1c7e7b35 | |
Xan Manning | 7e7cf2b97d | |
nolte | 5331e22425 | |
Xan Manning | 09fc37e6ec | |
Xan Manning | c3ae2b79eb | |
nolte | 2d0dc8db69 | |
nolte | a73a1fbdef | |
nolte | b896e90704 | |
nolte | 2e03ea2e6f | |
nolte | 227b24c117 | |
nolte | 1dd9297de4 | |
nolte | cb13c5b473 | |
nolte | 2aedce0359 | |
nolte | b89f2f3acd | |
nolte | 2b646e4e4f | |
nolte | 2307546be2 | |
Xan Manning | 734e49a7e5 | |
Xan Manning | da427f1518 | |
Xan Manning | f2a3f75f08 | |
Xan Manning | fe688dfc70 | |
Xan Manning | 717de81c7f | |
Xan Manning | e8e5dbf45a | |
Miika Kankare | c5cdc745e5 | |
Xan Manning | 99c103a14f | |
Xan Manning | ec61e0b4ce | |
Xan Manning | 26a3b2eef0 | |
Xan Manning | 8f3b2428c8 | |
Xan Manning | 2b8f354a88 | |
Xan Manning | d81d41e709 | |
Xan Manning | 9295347b6d | |
Xan Manning | 5e39160ed9 | |
Xan Manning | 1282da8cfa | |
Xan Manning | 6e9566d5eb | |
Xan Manning | efc703541c | |
Xan Manning | 2327d0433d | |
Xan Manning | f077120580 | |
Xan Manning | 43275f5d63 | |
abdennour | 07661f7df8 | |
Xan Manning | 389974d7d3 | |
James D. Marble | 3e83e3c301 | |
Xan Manning | 27083e1d5b | |
Xan Manning | 728dd1ff12 |
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
|
||||
skip_list:
|
||||
- role-name
|
||||
- name[template]
|
|
@ -0,0 +1,26 @@
|
|||
ARG VARIANT=focal
|
||||
FROM ubuntu:${VARIANT}
|
||||
|
||||
COPY molecule/requirements.txt /tmp/molecule/requirements.txt
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
&& apt-get -y install curl git python3-dev python3-pip \
|
||||
python3-venv shellcheck sudo unzip docker.io jq \
|
||||
&& curl -L \
|
||||
"https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \
|
||||
-o /usr/bin/kubectl \
|
||||
&& chmod +x /usr/bin/kubectl \
|
||||
&& python3 -m pip install pip --upgrade \
|
||||
&& python3 -m pip install -r /tmp/molecule/requirements.txt
|
||||
|
||||
RUN useradd -s /bin/bash -m vscode && \
|
||||
usermod -aG docker vscode && \
|
||||
echo 'vscode ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/vscode && \
|
||||
echo 'source /etc/bash_completion.d/git-prompt' >> /home/vscode/.bashrc && \
|
||||
echo 'sudo chown vscode /var/run/docker-host.sock' >> /home/vscode/.bashrc && \
|
||||
echo 'export PS1="${PS1:0:-1}\[\033[38;5;196m\]$(__git_ps1)\[$(tput sgr0)\] "' >> /home/vscode/.bashrc
|
||||
|
||||
RUN ln -s /var/run/docker-host.sock /var/run/docker.sock
|
||||
|
||||
USER vscode
|
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"name": "Ubuntu",
|
||||
"build": {
|
||||
"context": "..",
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": { "VARIANT": "focal" }
|
||||
},
|
||||
|
||||
"settings": {
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"bash (login)": {
|
||||
"path": "/bin/bash",
|
||||
"args": ["-l"]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
"extensions": [
|
||||
"ms-azuretools.vscode-docker",
|
||||
"redhat.vscode-yaml"
|
||||
],
|
||||
|
||||
"mounts": [
|
||||
"source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind"
|
||||
],
|
||||
|
||||
"remoteUser": "vscode"
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
---
|
||||
|
||||
<!-- Please first verify that your issue is not already reported on GitHub -->
|
||||
<!-- Complete *all* sections as described. -->
|
||||
|
||||
### Summary
|
||||
|
||||
<!-- Explain the problem briefly below -->
|
||||
|
||||
### Issue Type
|
||||
|
||||
- Bug Report
|
||||
|
||||
### Controller Environment and Configuration
|
||||
|
||||
<!-- Please re-run your playbook with: `-e "pyratlabs_issue_controller_dump=true"` -->
|
||||
<!-- Example: `ansible-playbook -e "pyratlabs_issue_controller_dump=true" /path/to/playbook.yml` -->
|
||||
<!-- Then please copy-and-paste the contents (or attach) to this issue. -->
|
||||
|
||||
<!-- Please also include information about the version of the role you are using -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
||||
|
||||
### Steps to Reproduce
|
||||
|
||||
<!-- Describe exactly how to reproduce the problem, using a minimal test-case -->
|
||||
|
||||
<!-- Paste example playbooks or commands between quotes below -->
|
||||
|
||||
```yaml
|
||||
|
||||
```
|
||||
|
||||
### Expected Result
|
||||
|
||||
<!-- Describe what you expected to happen when running the steps above -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
||||
|
||||
### Actual Result
|
||||
|
||||
<!-- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
|
||||
|
||||
<!-- Paste verbatim command output between quotes -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
|
||||
blank_issues_enabled: true
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
---
|
||||
|
||||
<!-- Please first verify that your feature was not already discussed on GitHub -->
|
||||
<!-- Complete *all* sections as described, this form is processed automatically -->
|
||||
|
||||
### Summary
|
||||
|
||||
<!-- Describe the new feature/improvement briefly below -->
|
||||
|
||||
### Issue Type
|
||||
|
||||
- Feature Request
|
||||
|
||||
### User Story
|
||||
|
||||
<!-- If you can, please provide a user story, if you don't know what this is don't worry, it will be refined by PyratLabs. -->
|
||||
<!-- Describe who would use it, why it is needed and the benefit -->
|
||||
|
||||
_As a_ <!-- (Insert Persona) --> \
|
||||
_I want to_ <!-- (Insert Action) --> \
|
||||
_So that_ <!-- (Insert Benefit) -->
|
||||
|
||||
### Additional Information
|
||||
|
||||
<!-- Please include any relevant documentation, URLs, etc. -->
|
||||
<!-- Paste example playbooks or commands between quotes below -->
|
||||
|
||||
```yaml
|
||||
|
||||
```
|
|
@ -0,0 +1,37 @@
|
|||
## TITLE
|
||||
|
||||
### Summary
|
||||
|
||||
<!-- Describe the change below, including rationale and design decisions -->
|
||||
|
||||
<!-- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
|
||||
|
||||
### Issue type
|
||||
|
||||
<!-- Pick one below and delete the rest -->
|
||||
- Bugfix
|
||||
- Documentation
|
||||
- Feature
|
||||
|
||||
### Test instructions
|
||||
|
||||
<!-- Please provide instructions for testing this PR -->
|
||||
|
||||
### Acceptance Criteria
|
||||
|
||||
<!-- Please list criteria required to ensure this change has been sufficiently reviewed. -->
|
||||
|
||||
<!-- Example ticklist:
|
||||
- [ ] GitHub Actions Build passes.
|
||||
- [ ] Documentation updated.
|
||||
-->
|
||||
|
||||
### Additional Information
|
||||
|
||||
<!-- Include additional information to help people understand the change here -->
|
||||
|
||||
<!-- Paste verbatim command output below, e.g. before and after your change -->
|
||||
|
||||
```text
|
||||
|
||||
```
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 60
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: wontfix
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: false
|
|
@ -0,0 +1,98 @@
|
|||
---
|
||||
|
||||
name: CI
|
||||
'on':
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- v1_release
|
||||
schedule:
|
||||
- cron: "0 1 1 * *"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "xanmanning.k3s"
|
||||
|
||||
jobs:
|
||||
ansible-lint:
|
||||
name: Ansible Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: "xanmanning.k3s"
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install test dependencies
|
||||
run: pip3 install -r molecule/lint-requirements.txt
|
||||
|
||||
- name: Run yamllint
|
||||
run: yamllint -s .
|
||||
|
||||
- name: Run ansible-lint
|
||||
run: ansible-lint --exclude molecule/ --exclude meta/
|
||||
|
||||
molecule:
|
||||
name: Molecule
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- distro: geerlingguy/docker-debian11-ansible:latest
|
||||
scenario: default
|
||||
prebuilt: 'true'
|
||||
- distro: geerlingguy/docker-ubuntu2204-ansible:latest
|
||||
scenario: default
|
||||
prebuilt: 'true'
|
||||
- distro: geerlingguy/docker-amazonlinux2-ansible:latest
|
||||
scenario: default
|
||||
prebuilt: 'true'
|
||||
- distro: geerlingguy/docker-ubuntu2004-ansible:latest
|
||||
scenario: default
|
||||
prebuilt: 'true'
|
||||
- distro: geerlingguy/docker-fedora35-ansible:latest
|
||||
scenario: nodeploy
|
||||
prebuilt: 'true'
|
||||
- distro: geerlingguy/docker-fedora34-ansible:latest
|
||||
scenario: highavailabilitydb
|
||||
prebuilt: 'true'
|
||||
- distro: geerlingguy/docker-fedora33-ansible:latest
|
||||
scenario: autodeploy
|
||||
- distro: xanmanning/docker-alpine-ansible:3.16
|
||||
scenario: highavailabilityetcd
|
||||
prebuilt: 'false'
|
||||
- distro: geerlingguy/docker-rockylinux9-ansible:latest
|
||||
scenario: highavailabilityetcd
|
||||
prebuilt: 'true'
|
||||
|
||||
steps:
|
||||
- name: Checkout codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: "xanmanning.k3s"
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install test dependencies
|
||||
run: pip3 install -r molecule/requirements.txt
|
||||
|
||||
- name: Run Molecule tests
|
||||
run: molecule test --scenario-name "${{ matrix.scenario }}"
|
||||
# continue-on-error: true
|
||||
env:
|
||||
PY_COLORS: '1'
|
||||
ANSIBLE_FORCE_COLOR: '1'
|
||||
MOLECULE_DISTRO: ${{ matrix.distro }}
|
||||
MOLECULE_PREBUILT: ${{ matrix.prebuilt }}
|
||||
MOLECULE_DOCKER_COMMAND: ${{ matrix.command }}
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
|
||||
name: Release
|
||||
'on':
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "xanmanning.k3s"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout codebase
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: "xanmanning.k3s"
|
||||
|
||||
- name: Set up Python 3
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install Ansible
|
||||
run: pip3 install -r requirements.txt
|
||||
|
||||
- name: Trigger a new import on Galaxy
|
||||
run: ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }} $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)
|
|
@ -4,5 +4,10 @@ VAULT_PASSWORD
|
|||
VAULT_PASS
|
||||
.vault_pass
|
||||
.vault_pass.asc
|
||||
tests/fetch
|
||||
tests/ubuntu-*.log
|
||||
vagramt/fetch
|
||||
vagrant/ubuntu-*.log
|
||||
__pycache__
|
||||
ansible.cfg
|
||||
pyratlabs-issue-dump.txt
|
||||
.cache
|
||||
/.idea/
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
# Based on ansible-lint config
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
brackets:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
colons:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
commas:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
comments: disable
|
||||
comments-indentation: disable
|
||||
document-start: disable
|
||||
empty-lines:
|
||||
max: 3
|
||||
level: error
|
||||
hyphens:
|
||||
level: error
|
||||
indentation: disable
|
||||
key-duplicates: enable
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
new-lines:
|
||||
type: unix
|
||||
trailing-spaces: disable
|
||||
truthy: disable
|
|
@ -0,0 +1,640 @@
|
|||
# Change Log
|
||||
|
||||
<!--
|
||||
## DATE, vx.x.x
|
||||
|
||||
### Notable changes
|
||||
|
||||
### Breaking changes
|
||||
|
||||
### Known issues
|
||||
|
||||
### Contributors
|
||||
|
||||
---
|
||||
-->
|
||||
|
||||
## 2023-05-17, v3.4.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: resolve ansible lint warnings and fix molecule tests in github actions
|
||||
|
||||
### Contributors
|
||||
|
||||
- [dbrennand](https://github.com/dbrennand)
|
||||
|
||||
---
|
||||
|
||||
## 2023-03-11, v3.4.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- refactor: add `until: 1.23.15` to `secrets-encryption` from `k3s_experimental_config` as it is no longer experimental. Fixes #200.
|
||||
- docs(fix): typo in `CONTRIBUTING.md`
|
||||
|
||||
### Contributors
|
||||
|
||||
- [dbrennand](https://github.com/dbrennand)
|
||||
|
||||
---
|
||||
|
||||
## 2022-11-15, v3.3.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: length indentation in registry.yaml
|
||||
|
||||
---
|
||||
|
||||
## 2022-09-11, v3.3.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: `no_log` removed from `ansible.builtin.uri` tasks
|
||||
- feat: `k3s_skip_post_checks` option added
|
||||
|
||||
---
|
||||
|
||||
## 2022-06-17, v3.2.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- feature: added support for alpine #182
|
||||
- fix: `k3s_control_token` not working #187
|
||||
|
||||
## 2022-05-02, v3.1.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: molecule tests
|
||||
|
||||
---
|
||||
|
||||
## 2022-02-18, v3.1.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: support nftables for debian 11
|
||||
|
||||
### Contributors
|
||||
|
||||
- [eaglesemanation](https://github.com/eaglesemanation)
|
||||
|
||||
---
|
||||
|
||||
## 2022-01-30, v3.1.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- feat: use basename of url for items in `k3s_server_manifests_urls` and
|
||||
`k3s_server_pod_manifests_urls` if filename is not provided #177
|
||||
|
||||
### Contributors
|
||||
|
||||
- [kossmac](https://github.com/kossmac)
|
||||
|
||||
---
|
||||
|
||||
## 2022-01-06, v3.0.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: adding become to pre checks packages #173
|
||||
|
||||
### Contributors
|
||||
|
||||
- [xlejo](https://github.com/xlejo)
|
||||
|
||||
---
|
||||
|
||||
## 2022-01-02, v3.0.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- feat: Flattened task filesystem
|
||||
- feat: Moved some tasks into `vars/` as templated variables
|
||||
- feat: Airgap installation method added #165
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Minimum `python` version on targets is 3.6
|
||||
- `k3s_become_for_all` renamed to `k3s_become`
|
||||
- `k3s_become_for_*` removed.
|
||||
|
||||
### Contributors
|
||||
|
||||
- [crutonjohn](https://github.com/crutonjohn)
|
||||
|
||||
---
|
||||
|
||||
## 2021-12-23, v2.12.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fix typo in systemd unit file
|
||||
|
||||
### Contributors
|
||||
|
||||
- [andrewchen5678](https://github.com/andrewchen5678)
|
||||
|
||||
---
|
||||
|
||||
## 2021-12-20, v2.12.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fix RockyLinux HA etcd tests
|
||||
- add Debian 11 test
|
||||
- Fix Snapshotter in Molecule tests
|
||||
- Added missing documentation for `k3s_api_url`
|
||||
- Added option to change K3s updates API url
|
||||
- Custom environment variables in systemd unit files
|
||||
- Debian Bullseye support
|
||||
- Fix HA etcd cluster startup
|
||||
- Fix rootless for Debian
|
||||
|
||||
### Contributors
|
||||
|
||||
- [janar153](https://github.com/janar153)
|
||||
|
||||
---
|
||||
|
||||
## 2021-10-10, v2.11.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- docs: fixed references to `write-kubeconfig-mode` to set correct permissions #157
|
||||
- fix: Flag --delete-local-data has been deprecated #159
|
||||
|
||||
---
|
||||
|
||||
## 2021-09-08, v2.11.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- docs: example of IPv6 configuration
|
||||
- feat: checks for s3 backup configuration
|
||||
- feat: implement config.yaml.d
|
||||
|
||||
### Contributors
|
||||
|
||||
- [onedr0p](https://github.com/onedr0p)
|
||||
|
||||
---
|
||||
|
||||
## 2021-08-18, v2.10.6
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fix: Define registration address from node-ip #142
|
||||
|
||||
---
|
||||
|
||||
## 2021-08-14, v2.10.5
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Add advertised address #139
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@abelfodil](https://github.com/abelfodil)
|
||||
|
||||
---
|
||||
|
||||
## 2021-07-24, v2.10.4
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated systemd template to use token when joining a cluster #138
|
||||
|
||||
---
|
||||
|
||||
## 2021-07-21, v2.10.3
|
||||
|
||||
### Notable changes
|
||||
|
||||
- fix: typo #133
|
||||
- fix: restore clustering and avoid failure with jinja2_native=true #135
|
||||
- fix: do ignore etcd member count when uninstalling #136
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@Yaro](https://github.com/Yajo)
|
||||
|
||||
---
|
||||
|
||||
## 2021-06-22, v2.10.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Role is now tested against RockyLinux
|
||||
|
||||
---
|
||||
|
||||
## 2021-05-30, v2.10.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Case insensitive control node lookup #126
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@mrobinsn](https://github.com/mrobinsn)
|
||||
|
||||
---
|
||||
|
||||
## 2021-05-27, v2.10.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Only deploy templates on primary controller #119
|
||||
- Allow control plane static pods #120
|
||||
- Add support for specifying URLs in templates #124
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@bjw-s](https://github.com/bjw-s)
|
||||
- [@onedr0p](https://github.com/onedr0p)
|
||||
|
||||
---
|
||||
|
||||
## 2021-05-14, v2.9.1
|
||||
|
||||
<!-- Today was a better day... <3 -->
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Documentation, remove references to deprecated configuration techniques #115
|
||||
- Bugfix: Templating issue.
|
||||
|
||||
---
|
||||
|
||||
## 2021-05-13, v2.9.0
|
||||
|
||||
<!-- a shit day... -->
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Feature: Support k3s private registry configuration #114
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@anjia0532](https://github.com/anjia0532)
|
||||
|
||||
---
|
||||
|
||||
## 2021-05-06, v2.8.5
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix: Unmount CSI plugin folder to avoid data lost on uninstall #113
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@angelnu](https://github.com/angelnu)
|
||||
|
||||
---
|
||||
|
||||
## 2021-05-01, v2.8.4
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed issue with draining nodes #112
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@anjia0532](https://github.com/anjia0532)
|
||||
|
||||
---
|
||||
|
||||
## 2021-04-18, v2.8.3
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Typo fix in README.md #110
|
||||
- Fixed check mode for cgroup test #111
|
||||
- Added check mode into molecule test sequence
|
||||
- `inventory.yml` is now `blockinfile`
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@bdronneau](https://github.com/bdronneau)
|
||||
|
||||
---
|
||||
|
||||
## 2021-04-10, v2.8.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- #105 - Added Ansible v2.9.16 support
|
||||
- #102 - Pre-check for cgroup status
|
||||
|
||||
### Known issues
|
||||
|
||||
- As per README.md, you require `ansible` >= 2.9.16
|
||||
or `ansible-base` >= 2.10.4. See [#105(comment)](https://github.com/PyratLabs/ansible-role-k3s/issues/105#issuecomment-817182233)
|
||||
|
||||
---
|
||||
|
||||
## 2021-03-22, v2.8.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- #100 - Fixed typo in README.md
|
||||
|
||||
### Contributors
|
||||
|
||||
- [@mbwmbw1337](https://github.com/mbwmbw1337)
|
||||
|
||||
---
|
||||
|
||||
## 2021-03-14, v2.8.0
|
||||
|
||||
Happy π day!
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated GitHub Actions, resolved linting errors.
|
||||
- Renamed `k3s_control_node_address` -> `k3s_registration_address`
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- A task has been added to rename `k3s_control_node_address` to
|
||||
`k3s_registration_address` for any users still using this variable name,
|
||||
however this might still break something.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-28, v2.7.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix, missing become on cluster token check.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-27, v2.7.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Cluster init checks added.
|
||||
- Tidy up of tasks, failed checks.
|
||||
- Possible fix for #93 - force draining of nodes added.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-27, v2.6.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix: Templating error for single control plane nodes using Etcd.
|
||||
- Bugfix: a number of typos fixed.
|
||||
|
||||
---
|
||||
|
||||
## 2021-02-16, v2.6.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Tidy up of `when` params and `assert` tasks to be more readable.
|
||||
- Added feature to tweak K3S service dependencies.
|
||||
- Updated documentation:
|
||||
- Node labels and component arguments
|
||||
- systemd config
|
||||
- Use alternate CNI (Calico example)
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-31, v2.5.3
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix, missing update to minimum ansible version var #91.
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-30, v2.5.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bugfix, missing `k3s_start_on_boot` to control `systemd.enabled` added.
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-30, v2.5.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Added uninstall task to remove hard-linked files #88
|
||||
- Fixed missing become for `systemd` operations tasks. #89
|
||||
- Added `k3s_start_on_boot` to control `systemd.enabled`.
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-24, v2.5.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Added support for Ansible >= 2.9.17 #83
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-23, v2.4.3
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bufgix: Installation hangs on "Check that all nodes to be ready" #84
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-10, v2.4.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Bufgix: Docker check still failing on "false"
|
||||
|
||||
---
|
||||
|
||||
## 2021-01-02, v2.4.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed issue with armv6l (Raspberry Pi Zero W)
|
||||
- Added path for private repositories config to directory creation list.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-21, v2.4.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- `k3s_config_dir` derived from `k3s_config_file`, reused throughout the role
|
||||
to allow for easy removal of "Rancher" references #73.
|
||||
- `k3s_token_location` has moved to be in `k3s_config_dir`.
|
||||
- Tasks for creating directories now looped to caputure configuration from
|
||||
`k3s_server` and `k3s_agent` and ensure directories exist before k3s
|
||||
starts, see #75.
|
||||
- Server token collected directly from token file, not symlinked file
|
||||
(node-token).
|
||||
- `k3s_runtime_config` defined in `vars/` for validation and overwritten in
|
||||
tasks for control plane and workers.
|
||||
- Removed unused references to GitHub API.
|
||||
- `set_fact` and `command` tasks now use FQCN.
|
||||
- Check of `ansible_version` in environment check.
|
||||
- Introduction of target environment checks for #72.
|
||||
- Fixed bug with non-default listening port not being passed to workers.
|
||||
- Added ability to put documentation links into validation checks #76.
|
||||
- Removed the requirement for `jmespath` on the Ansible controller.
|
||||
- Fixed bug with issue data collection tasks.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Ansible minimum version is hard set to v2.10.4
|
||||
- `k3s_token_location` has moved to be in `k3s_config_dir` so re-running the
|
||||
role will create a duplicate file here.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-19, v2.3.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated k3s uninstall scripts #74
|
||||
- Started moving Rancher references to `vars/` as per #73
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-19, v2.2.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed typos in documentation.
|
||||
- Molecule testing pinned to v3.1 due to tests failing.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-16, v2.2.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Re-working documentation
|
||||
- Updated GitHub link, org changed from Rancher to k3s-io.
|
||||
- Replace deprecated `play_hosts` variable.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Moving git branch from `master` to `main`.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-12, v2.2.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Use of FQCNs enforced, minimum Ansible version now v2.10
|
||||
- `k3s_etcd_datastore` no longer experimental after K3s version v1.19.5+k3s1
|
||||
- Docker marked as deprecated for K3s > v1.20.0+k3s1
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Use of FQCNs enforced, minimum Ansible version now v2.10
|
||||
- Use of Docker requires `k3s_use_unsupported_config` to be `true` after
|
||||
v1.20.0+k3s1
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-05, v2.1.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed link to documentation.
|
||||
|
||||
---
|
||||
|
||||
## 2020-12-05, v2.1.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Deprecated configuration check built into validation steps.
|
||||
- Removed duplicated tasks for single node cluster.
|
||||
- Added documentation providing quickstart examples and common operations.
|
||||
- Fixed data-dir configuration.
|
||||
- Some tweaks to rootless.
|
||||
- Fix draining and removing of nodes.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `k3s_token_location` now points to a file location, not a directory.
|
||||
- `k3s_systemd_unit_directory` renamed to `k3s_systemd_unit_dir`
|
||||
- Removed `k3s_node_data_dir` as this is now configured with `data-dir` in
|
||||
`k3s_server` and/or `k3s_agent`.
|
||||
|
||||
### Known issues
|
||||
|
||||
- Rootless is still broken, this is still not supported as a method for
|
||||
running k3s using this role.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-30, v2.0.2
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Updated issue template and information collection tasks.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-30, v2.0.1
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Fixed a number of typos in the README.md
|
||||
- Updated the meta/main.yml to put quotes around minimum Ansible version.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-29, v2.0.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- #64 - Initial release of v2.0.0 of
|
||||
[ansible-role-k3s](https://github.com/PyratLabs/ansible-role-k3s).
|
||||
- Minimum supported k3s version now: v1.19.1+k3s1
|
||||
- Minimum supported Ansible version now: v2.10.0
|
||||
- #62 - Remove all references to the word "master".
|
||||
- #53 - Move to file-based configuration.
|
||||
- Refactored to avoid duplication in code and make contribution easier.
|
||||
- Validation checks moved to using variables defined in `vars/`
|
||||
|
||||
### Breaking changes
|
||||
|
||||
#### File based configuration
|
||||
|
||||
Issue #53
|
||||
|
||||
With the release of v1.19.1+k3s1, this role has moved to file-based
|
||||
configuration of k3s. This requires manuall translation of v1 configuration
|
||||
variables into configuration file format.
|
||||
|
||||
Please see: https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file
|
||||
|
||||
#### Minimum supported k3s version
|
||||
|
||||
As this role now relies on file-based configuration, the v2.x release of this
|
||||
role will only support v1.19+ of k3s. If you are not in a position to update
|
||||
k3s you will need to continue using the v1.x release of this role, which will
|
||||
be supported until March 2021<!-- 1 year after k8s v1.18 release -->.
|
||||
|
||||
#### Minimum supported ansible version
|
||||
|
||||
This role now only supports Ansible v2.10+, this is because it has moved on to
|
||||
using FQDNs, with the exception of `set_fact` tasks which have
|
||||
[been broken](https://github.com/ansible/ansible/issues/72319) and the fixes
|
||||
have [not yet been backported to v2.10](https://github.com/ansible/ansible/pull/71824).
|
||||
|
||||
The use of FQDNs allows for custom modules to be introduced to override task
|
||||
behavior. If this role requires a custom ansible module to be introduced then
|
||||
this can be added as a dependency and targeted specifically by using the
|
||||
correct FQDN.
|
|
@ -0,0 +1,46 @@
|
|||
# Contribution Guidelines
|
||||
|
||||
Thank you for taking time to contribute to this Ansible role.
|
||||
|
||||
There are a number of ways that you can contribute to this project, not all of
|
||||
them requiring you to be able to write code. Below is a list of suggested
|
||||
contributions welcomed by the community:
|
||||
|
||||
- Submit bug reports in GitHub issues
|
||||
- Comment on bug reports with further information or suggestions
|
||||
- Suggest new features
|
||||
- Create Pull Requests fixing bugs or adding new features
|
||||
- Update and improve documentation
|
||||
- Review the role on Ansible Galaxy
|
||||
- Write a blog post reviewing the role
|
||||
- Sponsor me.
|
||||
|
||||
## Issue guidelines
|
||||
|
||||
Issues are the best way to capture an bug in the role, or suggest new features.
|
||||
This is due to issues being visible to the entire community and allows for
|
||||
other contributors to pick up the work, so is a better communication medium
|
||||
than email.
|
||||
|
||||
A good bug issue will include as much information as possible about the
|
||||
environment Ansible is running in, as well as the role configuration. If there
|
||||
are any relevant pieces of documentation from upstream projects, this should
|
||||
be included.
|
||||
|
||||
New feature requests are also best captured in issues, these should include
|
||||
as much relevant information as possible and if possible include a "user story"
|
||||
(don't sweat if you don't know how to write one). If there are any relevant
|
||||
pieces of documentation from upstream projects, this should be included.
|
||||
|
||||
## Pull request guidelines
|
||||
|
||||
PRs should only contain 1 issue fix at a time to limit the scope of testing
|
||||
required. The smaller the scope of the PR, the easier it is for it to be
|
||||
reviewed.
|
||||
|
||||
PRs should include the keyword `Fixes` before an issue number if the PR will
|
||||
completely close the issue. This is because automation will close the issue
|
||||
once the PR is merged.
|
||||
|
||||
PRs are preferred to be merged in as a single commit, so rebasing before
|
||||
pushing is recommended, however this isn't a strict rule.
|
|
@ -1,22 +1,25 @@
|
|||
Copyright 2019 Xan Manning
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2020, Xan Manning
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
|
@ -24,3 +27,4 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
407
README.md
407
README.md
|
@ -1,83 +1,412 @@
|
|||
# Ansible Role: k3s
|
||||
# Ansible Role: k3s (v3.x)
|
||||
|
||||
Ansible role for installing [Racher Labs k3s](https://k3s.io/) ("Lightweight
|
||||
Ansible role for installing [K3S](https://k3s.io/) ("Lightweight
|
||||
Kubernetes") as either a standalone server or cluster.
|
||||
|
||||
[![CI](https://github.com/PyratLabs/ansible-role-k3s/workflows/CI/badge.svg?event=push)](https://github.com/PyratLabs/ansible-role-k3s/actions?query=workflow%3ACI)
|
||||
|
||||
## Help Wanted!
|
||||
|
||||
Hi! :wave: [@xanmanning](https://github.com/xanmanning) is looking for a new
|
||||
maintainer to work on this Ansible role. This is because I don't have as much
|
||||
free time any more and I no longer write Ansible regularly as part of my day
|
||||
job. If you're interested, get in touch.
|
||||
|
||||
## Release notes
|
||||
|
||||
Please see [Releases](https://github.com/PyratLabs/ansible-role-k3s/releases)
|
||||
and [CHANGELOG.md](CHANGELOG.md).
|
||||
|
||||
## Requirements
|
||||
|
||||
This role has been tested on Ansible 2.6.0+ against the following Linux Distributions:
|
||||
The host you're running Ansible from requires the following Python dependencies:
|
||||
|
||||
- CentOS 7
|
||||
- Debian 9
|
||||
- Ubuntu 18.04 LTS
|
||||
- `python >= 3.6.0` - [See Notes below](#important-note-about-python).
|
||||
- `ansible >= 2.9.16` or `ansible-base >= 2.10.4`
|
||||
|
||||
## Disclaimer
|
||||
You can install dependencies using the requirements.txt file in this repository:
|
||||
`pip3 install -r requirements.txt`.
|
||||
|
||||
:warning: Not suitable for production use.
|
||||
This role has been tested against the following Linux Distributions:
|
||||
|
||||
Whilst Rancher Labs are awesome, k3s is a fairly new project and not yet a v1.0
|
||||
release so extreme caution and operational rigor is recommended before using
|
||||
this role for any serious development.
|
||||
- Alpine Linux
|
||||
- Amazon Linux 2
|
||||
- Archlinux
|
||||
- CentOS 8
|
||||
- Debian 11
|
||||
- Fedora 31
|
||||
- Fedora 32
|
||||
- Fedora 33
|
||||
- openSUSE Leap 15
|
||||
- RockyLinux 8
|
||||
- Ubuntu 20.04 LTS
|
||||
|
||||
:warning: The v3 releases of this role only supports `k3s >= v1.19`, for
|
||||
`k3s < v1.19` please consider updating or use the v1.x releases of this role.
|
||||
|
||||
Before upgrading, see [CHANGELOG](CHANGELOG.md) for notifications of breaking
|
||||
changes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
### Group Variables
|
||||
Since K3s [v1.19.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.19.1%2Bk3s1)
|
||||
you can now configure K3s using a
|
||||
[configuration file](https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file)
|
||||
rather than environment variables or command line arguments. The v2 release of
|
||||
this role has moved to the configuration file method rather than populating a
|
||||
systemd unit file with command-line arguments. There may be exceptions that are
|
||||
defined in [Global/Cluster Variables](#globalcluster-variables), however you will
|
||||
mostly be configuring k3s by configuration files using the `k3s_server` and
|
||||
`k3s_agent` variables.
|
||||
|
||||
See "_Server (Control Plane) Configuration_" and "_Agent (Worker) Configuraion_"
|
||||
below.
|
||||
|
||||
### Global/Cluster Variables
|
||||
|
||||
Below are variables that are set against all of the play hosts for environment
|
||||
consistency.
|
||||
consistency. These are generally cluster-level configuration.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|--------------------------------|--------------------------------------------------------------------------|--------------------------------|
|
||||
| `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for latest. | `false` |
|
||||
| `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/rancher/k3s |
|
||||
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
|
||||
| `k3s_control_workers` | Are control hosts also workers? | `true` |
|
||||
| `k3s_ensure_docker_installed ` | Use Docker rather than Containerd? | `false` |
|
||||
| Variable | Description | Default Value |
|
||||
|--------------------------------------|--------------------------------------------------------------------------------------------|--------------------------------|
|
||||
| `k3s_state` | State of k3s: installed, started, stopped, downloaded, uninstalled, validated. | installed |
|
||||
| `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for stable. | `false` |
|
||||
| `k3s_airgap` | Boolean to enable air-gapped installations | `false` |
|
||||
| `k3s_config_file` | Location of the k3s configuration file. | `/etc/rancher/k3s/config.yaml` |
|
||||
| `k3s_build_cluster` | When multiple play hosts are available, attempt to cluster. Read notes below. | `true` |
|
||||
| `k3s_registration_address` | Fixed registration address for nodes. IP or FQDN. | NULL |
|
||||
| `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/k3s-io/k3s |
|
||||
| `k3s_api_url` | URL for K3S updates API. | https://update.k3s.io |
|
||||
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
|
||||
| `k3s_install_hard_links` | Install using hard links rather than symbolic links. | `false` |
|
||||
| `k3s_server_config_yaml_d_files` | A flat list of templates to supplement the `k3s_server` configuration. | [] |
|
||||
| `k3s_agent_config_yaml_d_files` | A flat list of templates to supplement the `k3s_agent` configuration. | [] |
|
||||
| `k3s_server_manifests_urls` | A list of URLs to deploy on the primary control plane. Read notes below. | [] |
|
||||
| `k3s_server_manifests_templates` | A flat list of templates to deploy on the primary control plane. | [] |
|
||||
| `k3s_server_pod_manifests_urls` | A list of URLs for installing static pod manifests on the control plane. Read notes below. | [] |
|
||||
| `k3s_server_pod_manifests_templates` | A flat list of templates for installing static pod manifests on the control plane. | [] |
|
||||
| `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` |
|
||||
| `k3s_use_unsupported_config` | Allow the use of unsupported configurations in k3s. | `false` |
|
||||
| `k3s_etcd_datastore` | Enable etcd embedded datastore (read notes below). | `false` |
|
||||
| `k3s_debug` | Enable debug logging on the k3s service. | `false` |
|
||||
| `k3s_registries` | Registries configuration file content. | `{ mirrors: {}, configs:{} }` |
|
||||
|
||||
### K3S Service Configuration
|
||||
|
||||
The below variables change how and when the systemd service unit file for K3S
|
||||
is run. Use this with caution, please refer to the [systemd documentation](https://www.freedesktop.org/software/systemd/man/systemd.unit.html#%5BUnit%5D%20Section%20Options)
|
||||
for more information.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|------------------------|----------------------------------------------------------------------|---------------|
|
||||
| `k3s_start_on_boot` | Start k3s on boot. | `true` |
|
||||
| `k3s_service_requires` | List of required systemd units to k3s service unit. | [] |
|
||||
| `k3s_service_wants` | List of "wanted" systemd unit to k3s (weaker than "requires"). | []\* |
|
||||
| `k3s_service_before` | Start k3s before a defined list of systemd units. | [] |
|
||||
| `k3s_service_after` | Start k3s after a defined list of systemd units. | []\* |
|
||||
| `k3s_service_env_vars` | Dictionary of environment variables to use within systemd unit file. | {} |
|
||||
| `k3s_service_env_file` | Location on host of a environment file to include. | `false`\*\* |
|
||||
|
||||
\* The systemd unit template **always** specifies `network-online.target` for
|
||||
`wants` and `after`.
|
||||
|
||||
\*\* The file must already exist on the target host, this role will not create
|
||||
nor manage the file. You can manage this file outside of the role with
|
||||
pre-tasks in your Ansible playbook.
|
||||
|
||||
### Group/Host Variables
|
||||
|
||||
Below are variables that are set against individual or groups of play hosts.
|
||||
Typically you'd set these at group level for the control plane or worker nodes.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|--------------------|-------------------------------------------------------------------|---------------------------------------------------|
|
||||
| `k3s_control_node` | Specify if a host (or host group) are part of the control plane. | `false` (role will automatically delegate a node) |
|
||||
| `k3s_server` | Server (control plane) configuration, see notes below. | `{}` |
|
||||
| `k3s_agent` | Agent (worker) configuration, see notes below. | `{}` |
|
||||
|
||||
#### Server (Control Plane) Configuration
|
||||
|
||||
The control plane is configured with the `k3s_server` dict variable. Please
|
||||
refer to the below documentation for configuration options:
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/
|
||||
|
||||
The `k3s_server` dictionary variable will contain flags from the above
|
||||
(removing the `--` prefix). Below is an example:
|
||||
|
||||
```yaml
|
||||
k3s_server:
|
||||
datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable
|
||||
cluster-cidr: 172.20.0.0/16
|
||||
flannel-backend: 'none' # This needs to be in quotes
|
||||
disable:
|
||||
- traefik
|
||||
- coredns
|
||||
```
|
||||
|
||||
Alternatively, you can create a .yaml file and read it in to the `k3s_server`
|
||||
variable as per the below example:
|
||||
|
||||
```yaml
|
||||
k3s_server: "{{ lookup('file', 'path/to/k3s_server.yml') | from_yaml }}"
|
||||
```
|
||||
|
||||
Check out the [Documentation](documentation/README.md) for example
|
||||
configuration.
|
||||
|
||||
#### Agent (Worker) Configuration
|
||||
|
||||
Workers are configured with the `k3s_agent` dict variable. Please refer to the
|
||||
below documentation for configuration options:
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config
|
||||
|
||||
The `k3s_agent` dictionary variable will contain flags from the above
|
||||
(removing the `--` prefix). Below is an example:
|
||||
|
||||
```yaml
|
||||
k3s_agent:
|
||||
with-node-id: true
|
||||
node-label:
|
||||
- "foo=bar"
|
||||
- "hello=world"
|
||||
```
|
||||
|
||||
Alternatively, you can create a .yaml file and read it in to the `k3s_agent`
|
||||
variable as per the below example:
|
||||
|
||||
```yaml
|
||||
k3s_agent: "{{ lookup('file', 'path/to/k3s_agent.yml') | from_yaml }}"
|
||||
```
|
||||
|
||||
Check out the [Documentation](documentation/README.md) for example
|
||||
configuration.
|
||||
|
||||
### Ansible Controller Configuration Variables
|
||||
|
||||
The below variables are used to change the way the role executes in Ansible,
|
||||
particularly with regards to privilege escalation.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|------------------------|----------------------------------------------------------------|---------------|
|
||||
| `k3s_skip_validation` | Skip all tasks that validate configuration. | `false` |
|
||||
| `k3s_skip_env_checks` | Skip all tasks that check environment configuration. | `false` |
|
||||
| `k3s_skip_post_checks` | Skip all tasks that check post execution state. | `false` |
|
||||
| `k3s_become` | Escalate user privileges for tasks that need root permissions. | `false` |
|
||||
|
||||
#### Important note about Python
|
||||
|
||||
From v3 of this role, Python 3 is required on the target system as well as on
|
||||
the Ansible controller. This is to ensure consistent behaviour for Ansible
|
||||
tasks as Python 2 is now EOL.
|
||||
|
||||
If target systems have both Python 2 and Python 3 installed, it is most likely
|
||||
that Python 2 will be selected by default. To ensure Python 3 is used on a
|
||||
target with both versions of Python, ensure `ansible_python_interpreter` is
|
||||
set in your inventory. Below is an example inventory:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
#### Important note about `k3s_release_version`
|
||||
|
||||
If you do not set a `k3s_release_version` the latest version of k3s will be
|
||||
installed. If you are developing against a specific version of k3s you must
|
||||
ensure this is set in your Ansible configuration, eg:
|
||||
If you do not set a `k3s_release_version` the latest version from the stable
|
||||
channel of k3s will be installed. If you are developing against a specific
|
||||
version of k3s you must ensure this is set in your Ansible configuration, eg:
|
||||
|
||||
```yaml
|
||||
k3s_release_version: v0.2.0
|
||||
k3s_release_version: v1.19.3+k3s1
|
||||
```
|
||||
|
||||
### Host Variables
|
||||
It is also possible to install specific K3s "Channels", below are some
|
||||
examples for `k3s_release_version`:
|
||||
|
||||
Below are variables that are set against specific hosts in your inventory.
|
||||
```yaml
|
||||
k3s_release_version: false # defaults to 'stable' channel
|
||||
k3s_release_version: stable # latest 'stable' release
|
||||
k3s_release_version: testing # latest 'testing' release
|
||||
k3s_release_version: v1.19 # latest 'v1.19' release
|
||||
k3s_release_version: v1.19.3+k3s3 # specific release
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
|--------------------|--------------------------------------------------------|---------------|
|
||||
| `k3s_control_node` | Define the host as a control plane node, (True/False). | `false` |
|
||||
# Specific commit
|
||||
# CAUTION - only used for testing - must be 40 characters
|
||||
k3s_release_version: 48ed47c4a3e420fa71c18b2ec97f13dc0659778b
|
||||
```
|
||||
|
||||
#### Important note about `k3s_control_node`
|
||||
#### Important note about `k3s_install_hard_links`
|
||||
|
||||
Currently only one host can be defined as a control node, if multiple hosts are
|
||||
set to true the play will fail.
|
||||
If you are using the [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller)
|
||||
you will need to use hard links rather than symbolic links as the controller
|
||||
will not be able to follow symbolic links. This option has been added however
|
||||
is not enabled by default to avoid breaking existing installations.
|
||||
|
||||
If you do not set a host as a control node, the role will automatically delegate
|
||||
the first play host as a control node.
|
||||
To enable the use of hard links, ensure `k3s_install_hard_links` is set
|
||||
to `true`.
|
||||
|
||||
```yaml
|
||||
k3s_install_hard_links: true
|
||||
```
|
||||
|
||||
The result of this can be seen by running the following in `k3s_install_dir`:
|
||||
|
||||
`ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort`
|
||||
|
||||
Symbolic Links:
|
||||
|
||||
```text
|
||||
[root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort
|
||||
3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1
|
||||
3279565 lrwxrwxrwx 1 root root 31 Jul 25 12:52 k3s -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
3279644 -rwxr-xr-x 1 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1
|
||||
3280079 lrwxrwxrwx 1 root root 31 Jul 25 12:52 ctr -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
3280080 lrwxrwxrwx 1 root root 31 Jul 25 12:52 crictl -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
3280081 lrwxrwxrwx 1 root root 31 Jul 25 12:52 kubectl -> /usr/local/bin/k3s-v1.18.6+k3s1
|
||||
```
|
||||
|
||||
Hard Links:
|
||||
|
||||
```text
|
||||
[root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort
|
||||
3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 crictl
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 ctr
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1
|
||||
3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 kubectl
|
||||
```
|
||||
|
||||
#### Important note about `k3s_build_cluster`
|
||||
|
||||
If you set `k3s_build_cluster` to `false`, this role will install each play
|
||||
host as a standalone node. An example of when you might use this would be
|
||||
when building a large number of standalone IoT devices running K3s. Below is a
|
||||
hypothetical situation where we are to deploy 25 Raspberry Pi devices, each a
|
||||
standalone system and not a cluster of 25 nodes. To do this we'd use a playbook
|
||||
similar to the below:
|
||||
|
||||
```yaml
|
||||
- hosts: k3s_nodes # eg. 25 RPi's defined in our inventory.
|
||||
vars:
|
||||
k3s_build_cluster: false
|
||||
roles:
|
||||
- xanmanning.k3s
|
||||
```
|
||||
|
||||
#### Important note about `k3s_control_node` and High Availability (HA)
|
||||
|
||||
By default only one host will be defined as a control node by Ansible, If you
|
||||
do not set a host as a control node, this role will automatically delegate
|
||||
the first play host as a control node. This is not suitable for use within
|
||||
a Production workload.
|
||||
|
||||
If multiple hosts have `k3s_control_node` set to `true`, you must also set
|
||||
`datastore-endpoint` in `k3s_server` as the connection string to a MySQL or
|
||||
PostgreSQL database, or external Etcd cluster else the play will fail.
|
||||
|
||||
If using TLS, the CA, Certificate and Key need to already be available on
|
||||
the play hosts.
|
||||
|
||||
See: [High Availability with an External DB](https://rancher.com/docs/k3s/latest/en/installation/ha/)
|
||||
|
||||
It is also possible, though not supported, to run a single K3s control node
|
||||
with a `datastore-endpoint` defined. As this is not a typically supported
|
||||
configuration you will need to set `k3s_use_unsupported_config` to `true`.
|
||||
|
||||
Since K3s v1.19.1 it is possible to use an embedded Etcd as the backend
|
||||
database, and this is done by setting `k3s_etcd_datastore` to `true`.
|
||||
The best practice for Etcd is to define at least 3 members to ensure quorum is
|
||||
established. In addition to this, an odd number of members is recommended to
|
||||
ensure a majority in the event of a network partition. If you want to use 2
|
||||
members or an even number of members, please set `k3s_use_unsupported_config`
|
||||
to `true`.
|
||||
|
||||
#### Important note about `k3s_server_manifests_urls` and `k3s_server_pod_manifests_urls`
|
||||
|
||||
To deploy server manifests and server pod manifests from URL, you need to
|
||||
specify a `url` and optionally a `filename` (if none provided basename is used). Below is an example of how to deploy the
|
||||
Tigera operator for Calico and kube-vip.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server_manifests_urls:
|
||||
- url: https://docs.projectcalico.org/archive/v3.19/manifests/tigera-operator.yaml
|
||||
filename: tigera-operator.yaml
|
||||
|
||||
k3s_server_pod_manifests_urls:
|
||||
- url: https://raw.githubusercontent.com/kube-vip/kube-vip/main/example/deploy/0.1.4.yaml
|
||||
filename: kube-vip.yaml
|
||||
|
||||
```
|
||||
|
||||
#### Important note about `k3s_airgap`
|
||||
|
||||
When deploying k3s in an air gapped environment you should provide the `k3s` binary in `./files/`. The binary will not be downloaded from Github and will subsequently not be verified using the provided sha256 sum, nor able to verify the version that you are running. All risks and burdens associated are assumed by the user in this scenario.
|
||||
|
||||
## Dependencies
|
||||
|
||||
No dependencies on other roles.
|
||||
|
||||
## Example Playbook
|
||||
## Example Playbooks
|
||||
|
||||
Example playbook:
|
||||
Example playbook, single control node running `testing` channel k3s:
|
||||
|
||||
```yaml
|
||||
- hosts: k3s_nodes
|
||||
vars:
|
||||
k3s_release_version: testing
|
||||
roles:
|
||||
- { role: xanmanning.k3s, k3s_release_version: v0.2.0 }
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
Example playbook, Highly Available with PostgreSQL database running the latest
|
||||
stable release:
|
||||
|
||||
```yaml
|
||||
- hosts: k3s_nodes
|
||||
vars:
|
||||
k3s_registration_address: loadbalancer # Typically a load balancer.
|
||||
k3s_server:
|
||||
datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
|
||||
pre_tasks:
|
||||
- name: Set each node to be a control node
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_node: true
|
||||
when: inventory_hostname in ['node2', 'node3']
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
BSD
|
||||
[BSD 3-clause](LICENSE.txt)
|
||||
|
||||
## Contributors
|
||||
|
||||
Contributions from the community are very welcome, but please read the
|
||||
[contribution guidelines](CONTRIBUTING.md) before doing so, this will help
|
||||
make things as streamlined as possible.
|
||||
|
||||
Also, please check out the awesome
|
||||
[list of contributors](https://github.com/PyratLabs/ansible-role-k3s/graphs/contributors).
|
||||
|
||||
## Author Information
|
||||
|
||||
[Xan Manning](https://xanmanning.co.uk/)
|
||||
[Xan Manning](https://xan.manning.io/)
|
||||
|
|
|
@ -1,17 +1,157 @@
|
|||
---
|
||||
|
||||
##
|
||||
# Global/Cluster Configuration
|
||||
##
|
||||
|
||||
# k3s state, options: installed, started, stopped, restarted, uninstalled, validated
|
||||
# (default: installed)
|
||||
k3s_state: installed
|
||||
|
||||
# Use a specific k3s version, if set to "false" we will get the latest
|
||||
# k3s_release_version: v0.1.0
|
||||
# k3s_release_version: v1.19.3
|
||||
k3s_release_version: false
|
||||
|
||||
# Location of the k3s configuration file
|
||||
k3s_config_file: "/etc/rancher/k3s/config.yaml"
|
||||
|
||||
# Location of the k3s configuration directory
|
||||
k3s_config_yaml_d_dir: "/etc/rancher/k3s/config.yaml.d"
|
||||
|
||||
# When multiple ansible_play_hosts are present, attempt to cluster the nodes.
|
||||
# Using false will create multiple standalone nodes.
|
||||
# (default: true)
|
||||
k3s_build_cluster: true
|
||||
|
||||
# URL for GitHub project
|
||||
k3s_github_url: https://github.com/rancher/k3s
|
||||
k3s_github_url: https://github.com/k3s-io/k3s
|
||||
|
||||
# URL for K3s updates API
|
||||
k3s_api_url: https://update.k3s.io
|
||||
|
||||
# Install K3s in Air Gapped scenarios
|
||||
k3s_airgap: false
|
||||
|
||||
# Skip all tasks that validate configuration
|
||||
k3s_skip_validation: false
|
||||
|
||||
# Skip all tasks that check environment configuration
|
||||
k3s_skip_env_checks: false
|
||||
|
||||
# Skip post-checks
|
||||
k3s_skip_post_checks: false
|
||||
|
||||
# Installation directory for k3s
|
||||
k3s_install_dir: /usr/local/bin
|
||||
|
||||
# Are control hosts also worker nodes?
|
||||
k3s_control_workers: true
|
||||
# Install using hard links rather than symbolic links
|
||||
k3s_install_hard_links: false
|
||||
|
||||
# Ensure Docker is installed on nodes
|
||||
k3s_ensure_docker_installed: false
|
||||
# A list of templates used for configuring the server.
|
||||
k3s_server_config_yaml_d_files: []
|
||||
|
||||
# A list of templates used for configuring the agent.
|
||||
k3s_agent_config_yaml_d_files: []
|
||||
|
||||
# A list of templates used for pre-configuring the cluster.
|
||||
k3s_server_manifests_templates: []
|
||||
|
||||
# A list of URLs used for pre-configuring the cluster.
|
||||
k3s_server_manifests_urls: []
|
||||
# - url: https://some/url/to/manifest.yml
|
||||
# filename: manifest.yml
|
||||
|
||||
# A list of templates used for installing static pod manifests on the control plane.
|
||||
k3s_server_pod_manifests_templates: []
|
||||
|
||||
# A list of URLs used for installing static pod manifests on the control plane.
|
||||
k3s_server_pod_manifests_urls: []
|
||||
# - url: https://some/url/to/manifest.yml
|
||||
# filename: manifest.yml
|
||||
|
||||
# Use experimental features in k3s?
|
||||
k3s_use_experimental: false
|
||||
|
||||
# Allow for unsupported configurations in k3s?
|
||||
k3s_use_unsupported_config: false
|
||||
|
||||
# Enable etcd embedded datastore
|
||||
k3s_etcd_datastore: false
|
||||
|
||||
##
|
||||
# Systemd config
|
||||
##
|
||||
|
||||
# Start k3s on system boot
|
||||
k3s_start_on_boot: true
|
||||
|
||||
# List of required systemd units to k3s service unit.
|
||||
k3s_service_requires: []
|
||||
|
||||
# List of "wanted" systemd unit to k3s (weaker than "requires").
|
||||
k3s_service_wants: []
|
||||
|
||||
# Start k3s before a defined list of systemd units.
|
||||
k3s_service_before: []
|
||||
|
||||
# Start k3s after a defined list of systemd units.
|
||||
k3s_service_after: []
|
||||
|
||||
# Dictionary of environment variables to use within systemd unit file
|
||||
# Some examples below
|
||||
k3s_service_env_vars: {}
|
||||
# PATH: /opt/k3s/bin
|
||||
# GOGC: 10
|
||||
|
||||
# Location on host of a environment file to include. This must already exist on
|
||||
# the target as this role will not populate this file.
|
||||
k3s_service_env_file: false
|
||||
|
||||
|
||||
##
|
||||
# Server Configuration
|
||||
##
|
||||
|
||||
k3s_server: {}
|
||||
# k3s_server:
|
||||
# listen-port: 6443
|
||||
|
||||
##
|
||||
# Agent Configuration
|
||||
##
|
||||
|
||||
k3s_agent: {}
|
||||
# k3s_agent:
|
||||
# node-label:
|
||||
# - "foo=bar"
|
||||
# - "bish=bosh"
|
||||
|
||||
##
|
||||
# Ansible Controller configuration
|
||||
##
|
||||
|
||||
# Use become privileges?
|
||||
k3s_become: false
|
||||
|
||||
# Private registry configuration.
|
||||
# Rancher k3s documentation: https://rancher.com/docs/k3s/latest/en/installation/private-registry/
|
||||
k3s_registries:
|
||||
|
||||
mirrors:
|
||||
# docker.io:
|
||||
# endpoint:
|
||||
# - "https://mycustomreg.com:5000"
|
||||
configs:
|
||||
# "mycustomreg:5000":
|
||||
# auth:
|
||||
# # this is the registry username
|
||||
# username: xxxxxx
|
||||
# # this is the registry password
|
||||
# password: xxxxxx
|
||||
# tls:
|
||||
# # path to the cert file used in the registry
|
||||
# cert_file:
|
||||
# # path to the key file used in the registry
|
||||
# key_file:
|
||||
# # path to the ca file used in the registry
|
||||
# ca_file:
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
# ansible-role-k3s
|
||||
|
||||
This document describes a number of ways of consuming this Ansible role for use
|
||||
in your own k3s deployments. It will not be able to cover every use case
|
||||
scenario but will provide some common example configurations.
|
||||
|
||||
## Requirements
|
||||
|
||||
Before you start you will need an Ansible controller. This can either be your
|
||||
workstation, or a dedicated system that you have access to. The instructions
|
||||
in this documentation assume you are using `ansible` CLI, there are no
|
||||
instructions available for Ansible Tower at this time.
|
||||
|
||||
Follow the below guide to get Ansible installed.
|
||||
|
||||
https://docs.ansible.com/ansible/latest/installation_guide/index.html
|
||||
|
||||
## Quickstart
|
||||
|
||||
Below are quickstart examples for a single node k3s server, a k3s cluster
|
||||
with a single control node and HA k3s cluster. These represent the bare
|
||||
minimum configuration.
|
||||
|
||||
- [Single node k3s](quickstart-single-node.md)
|
||||
- [Simple k3s cluster](quickstart-cluster.md)
|
||||
- [HA k3s cluster using embedded etcd](quickstart-ha-cluster.md)
|
||||
|
||||
## Example configurations and operations
|
||||
|
||||
### Configuration
|
||||
|
||||
- [Setting up 2-node HA control plane with external datastore](configuration/2-node-ha-ext-datastore.md)
|
||||
- [Provision multiple standalone k3s nodes](configuration/multiple-standalone-k3s-nodes.md)
|
||||
- [Set node labels and component arguments](configuration/node-labels-and-component-args.md)
|
||||
- [Use an alternate CNI](configuration/use-an-alternate-cni.md)
|
||||
- [IPv4/IPv6 Dual-Stack config](configuration/ipv4-ipv6-dual-stack.md)
|
||||
- [Start K3S after another service](configuration/systemd-config.md)
|
||||
|
||||
### Operations
|
||||
|
||||
- [Stop/Start a cluster](operations/stop-start-cluster.md)
|
||||
- [Updating k3s](operations/updating-k3s.md)
|
||||
- [Extending a cluster](operations/extending-a-cluster.md)
|
||||
- [Shrinking a cluster](operations/shrinking-a-cluster.md)
|
|
@ -0,0 +1,79 @@
|
|||
# 2 Node HA Control Plane with external database
|
||||
|
||||
For this configuration we are deploying a highly available control plane
|
||||
composed of two control nodes. This can be achieved with embedded etcd, however
|
||||
etcd ideally has an odd number of nodes.
|
||||
|
||||
The example below will use an external PostgreSQL datastore to store the
|
||||
cluster state information.
|
||||
|
||||
Main guide: https://rancher.com/docs/k3s/latest/en/installation/ha/
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
+-------------------+
|
||||
| Load Balancer/VIP |
|
||||
+---------+---------+
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
+------------+ | +------------+
|
||||
| | | | |
|
||||
+--------+ control-01 +<-----+----->+ control-02 |
|
||||
| | | | |
|
||||
| +-----+------+ +------+-----+
|
||||
| | |
|
||||
| +-------------+-------------+
|
||||
| | | |
|
||||
| +------v----+ +-----v-----+ +----v------+
|
||||
| | | | | | |
|
||||
| | worker-01 | | worker-02 | | worker-03 |
|
||||
| | | | | | |
|
||||
| +-----------+ +-----------+ +-----------+
|
||||
|
|
||||
| +-------+ +-------+
|
||||
| | | | |
|
||||
+-------------------> db-01 +--+ db-02 |
|
||||
| | | |
|
||||
+-------+ +-------+
|
||||
```
|
||||
|
||||
### Required Components
|
||||
|
||||
- Load balancer
|
||||
- 2 control plane nodes
|
||||
- 1 or more worker nodes
|
||||
- PostgreSQL Database (replicated, or Linux HA Cluster).
|
||||
|
||||
## Configuration
|
||||
|
||||
For your control nodes, you will need to instruct the control plane of the
|
||||
PostgreSQL datastore endpoint and set `k3s_registration_address` to be the
|
||||
hostname or IP of your load balancer or VIP.
|
||||
|
||||
Below is the example for PostgreSQL, it is possible to use MySQL or an Etcd
|
||||
cluster as well. Consult the below guide for using alternative datastore
|
||||
endpoints.
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server:
|
||||
datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable
|
||||
node-taint:
|
||||
- "k3s-controlplane=true:NoExecute"
|
||||
```
|
||||
|
||||
Your worker nodes need to know how to connect to the control plane, this is
|
||||
defined by setting `k3s_registration_address` to the hostname or IP address of
|
||||
the load balancer.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_registration_address: control.examplek3s.com
|
||||
```
|
|
@ -0,0 +1,21 @@
|
|||
# IPv4 and IPv6 Dual-stack config
|
||||
|
||||
If you need to run your K3S cluster with both IPv4 and IPv6 address ranges
|
||||
you will need to configure the `k3s_server.cluster-cidr` and
|
||||
`k3s_server.service-cidr` values specifying both ranges.
|
||||
|
||||
:hand: if you are using `k3s<1.23` you will need to use a different CNI as
|
||||
dual-stack support is not available in Flannel.
|
||||
|
||||
Below is a noddy example:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server:
|
||||
# Using Calico on k3s<1.23 so Flannel needs to be disabled.
|
||||
flannel-backend: 'none'
|
||||
# Format: ipv4/cidr,ipv6/cidr
|
||||
cluster-cidr: 10.42.0.0/16,fc00:a0::/64
|
||||
service-cidr: 10.43.0.0/16,fc00:a1::/64
|
||||
```
|
|
@ -0,0 +1,71 @@
|
|||
# Multiple standalone K3s nodes
|
||||
|
||||
This is an example of when you might want to configure multiple standalone
|
||||
k3s nodes simultaneously. For this we will assume a hypothetical situation
|
||||
where we are configuring 25 Raspberry Pis to deploy to our shop floors.
|
||||
|
||||
Each Rasperry Pi will be configured as a standalone IoT device hosting an
|
||||
application that will push data to head office.
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
+-------------+
|
||||
| |
|
||||
| Node-01 +-+
|
||||
| | |
|
||||
+--+----------+ +-+
|
||||
| | |
|
||||
+--+---------+ +-+
|
||||
| | |
|
||||
+--+--------+ |
|
||||
| | Node-N
|
||||
+----------+
|
||||
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Below is our example inventory of 200 nodes (Truncated):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_workers:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
# ..... SNIP .....
|
||||
|
||||
kube-199:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.201
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-200:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.202
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
In our `group_vars/` (or as `vars:` in our playbook), we will need to set the
|
||||
`k3s_build_cluster` variable to `false`. This will stop the role from
|
||||
attempting to cluster all 200 nodes, instead it will install k3s across each
|
||||
node as as 200 standalone servers.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_build_cluster: false
|
||||
```
|
|
@ -0,0 +1,39 @@
|
|||
# Configure node labels and component arguments
|
||||
|
||||
The following command line arguments can be specified multiple times with
|
||||
`key=value` pairs:
|
||||
|
||||
- `--kube-kubelet-arg`
|
||||
- `--kube-proxy-arg`
|
||||
- `--kube-apiserver-arg`
|
||||
- `--kube-scheduler-arg`
|
||||
- `--kube-controller-manager-arg`
|
||||
- `--kube-cloud-controller-manager-arg`
|
||||
- `--node-label`
|
||||
- `--node-taint`
|
||||
|
||||
In the config file, this is done by defining a list of values for each
|
||||
command like argument, for example:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server:
|
||||
# Set the plugins registry directory
|
||||
kubelet-arg:
|
||||
- "volume-plugin-dir=/var/lib/rancher/k3s/agent/kubelet/plugins_registry"
|
||||
# Set the pod eviction timeout and node monitor grace period
|
||||
kube-controller-manager-arg:
|
||||
- "pod-eviction-timeout=2m"
|
||||
- "node-monitor-grace-period=30s"
|
||||
# Set API server feature gate
|
||||
kube-apiserver-arg:
|
||||
- "feature-gates=RemoveSelfLink=false"
|
||||
# Laels to apply to a node
|
||||
node-label:
|
||||
- "NodeTier=development"
|
||||
- "NodeLocation=eu-west-2a"
|
||||
# Stop k3s control plane having workloads scheduled on them
|
||||
node-taint:
|
||||
- "k3s-controlplane=true:NoExecute"
|
||||
```
|
|
@ -0,0 +1,19 @@
|
|||
# systemd config
|
||||
|
||||
Below are examples to tweak how and when K3S starts up.
|
||||
|
||||
## Wanted service units
|
||||
|
||||
In this example, we're going to start K3S after Wireguard. Our example server
|
||||
has a Wireguard connection `wg0`. We are using "wants" rather than "requires"
|
||||
as it's a weaker requirement that Wireguard must be running. We then want
|
||||
K3S to start after Wireguard has started.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_service_wants:
|
||||
- wg-quick@wg0.service
|
||||
k3s_service_after:
|
||||
- wg-quick@wg0.service
|
||||
```
|
|
@ -0,0 +1,63 @@
|
|||
# Use an alternate CNI
|
||||
|
||||
K3S ships with Flannel, however sometimes you want an different CNI such as
|
||||
Calico, Canal or Weave Net. To do this you will need to disable Flannel with
|
||||
`flannel-backend: "none"`, specify a `cluster-cidr` and add your CNI manifests
|
||||
to the `k3s_server_manifests_templates`.
|
||||
|
||||
## Calico example
|
||||
|
||||
The below is based on the
|
||||
[Calico quickstart documentation](https://docs.projectcalico.org/getting-started/kubernetes/quickstart).
|
||||
|
||||
Steps:
|
||||
|
||||
1. Download `tigera-operator.yaml` to the manifests directory.
|
||||
1. Download `custom-resources.yaml` to the manifests directory.
|
||||
1. Choose a `cluster-cidr` (we are using 192.168.0.0/16)
|
||||
1. Set `k3s_server` and `k3s_server_manifest_templates` as per the below,
|
||||
ensure the paths to manifests are correct for your project repo.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
# K3S Server config, don't deploy flannel and set cluster pod CIDR.
|
||||
k3s_server:
|
||||
cluster-cidr: 192.168.0.0/16
|
||||
flannel-backend: "none"
|
||||
|
||||
# Deploy the following k3s server templates.
|
||||
k3s_server_manifests_templates:
|
||||
- "manifests/calico/tigera-operator.yaml"
|
||||
- "manifests/calico/custom-resources.yaml"
|
||||
```
|
||||
|
||||
All nodes should come up as "Ready", below is a 3-node cluster:
|
||||
|
||||
```text
|
||||
$ kubectl get nodes -o wide -w
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready control-plane,etcd,master 114s v1.20.2+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1
|
||||
kube-1 Ready control-plane,etcd,master 80s v1.20.2+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1
|
||||
kube-2 Ready control-plane,etcd,master 73s v1.20.2+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1
|
||||
```
|
||||
|
||||
Pods should be deployed with deployed within the CIDR specified in our config
|
||||
file.
|
||||
|
||||
```text
|
||||
$ kubectl get pods -o wide -A
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
calico-system calico-kube-controllers-cfb4ff54b-8rp8r 1/1 Running 0 5m4s 192.168.145.65 kube-0 <none> <none>
|
||||
calico-system calico-node-2cm2m 1/1 Running 0 5m4s 10.10.9.2 kube-0 <none> <none>
|
||||
calico-system calico-node-2s6lx 1/1 Running 0 4m42s 10.10.9.4 kube-2 <none> <none>
|
||||
calico-system calico-node-zwqjz 1/1 Running 0 4m49s 10.10.9.3 kube-1 <none> <none>
|
||||
calico-system calico-typha-7b6747d665-78swq 1/1 Running 0 3m5s 10.10.9.4 kube-2 <none> <none>
|
||||
calico-system calico-typha-7b6747d665-8ff66 1/1 Running 0 3m5s 10.10.9.3 kube-1 <none> <none>
|
||||
calico-system calico-typha-7b6747d665-hgplx 1/1 Running 0 5m5s 10.10.9.2 kube-0 <none> <none>
|
||||
kube-system coredns-854c77959c-6qhgt 1/1 Running 0 5m20s 192.168.145.66 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-4czr9 0/1 Completed 0 5m20s 192.168.145.67 kube-0 <none> <none>
|
||||
kube-system metrics-server-86cbb8457f-qcxf5 1/1 Running 0 5m20s 192.168.145.68 kube-0 <none> <none>
|
||||
kube-system traefik-6f9cbd9bd4-7h4rl 1/1 Running 0 2m50s 192.168.126.65 kube-1 <none> <none>
|
||||
tigera-operator tigera-operator-b6c4bfdd9-29hhr 1/1 Running 0 5m20s 10.10.9.2 kube-0 <none> <none>
|
||||
```
|
|
@ -0,0 +1,69 @@
|
|||
# Extending a cluster
|
||||
|
||||
This document describes the method for extending an cluster with new worker
|
||||
nodes.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
Currently your `inventory.yml` looks like this, it has two nodes defined,
|
||||
`kube-0` (control node) and `kube-1` (worker node).
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
## Method
|
||||
|
||||
We have our two nodes, one control, one worker. The goal is to extend this to
|
||||
add capacity by adding a new worker node, `kube-2`. To do this we will add the
|
||||
new node to our inventory.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
Once the new node has been added, you can re-run the automation to join it to
|
||||
the cluster. You should expect the majority of changes to the worker node being
|
||||
introduced to the cluster.
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=53 changed=1 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0
|
||||
kube-1 : ok=40 changed=1 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
kube-2 : ok=42 changed=10 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
```
|
|
@ -0,0 +1,74 @@
|
|||
# Shrinking a cluster
|
||||
|
||||
This document describes the method for shrinking a cluster, by removing a
|
||||
worker nodes.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
Currently your `inventory.yml` looks like this, it has three nodes defined,
|
||||
`kube-0` (control node) and `kube-1`, `kube-2` (worker nodes).
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
## Method
|
||||
|
||||
We have our three nodes, one control, two workers. The goal is to shrink this to
|
||||
remove excess capacity by offboarding the worker node `kube-2`. To do this we
|
||||
will set `kube-2` node to `k3s_state: uninstalled` in our inventory.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_state: uninstalled
|
||||
```
|
||||
|
||||
What you will typically see is changes to your control plane (`kube-0`) and the
|
||||
node being removed (`kube-2`). The role will register the removal of the node
|
||||
with the cluster by draining the node and removing it from the cluster.
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=55 changed=2 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-1 : ok=40 changed=0 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
kube-2 : ok=23 changed=2 unreachable=0 failed=0 skipped=17 rescued=0 ignored=1
|
||||
```
|
|
@ -0,0 +1,93 @@
|
|||
# Stopping and Starting a cluster
|
||||
|
||||
This document describes the Ansible method for restarting a k3s cluster
|
||||
deployed by this role.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
## Method
|
||||
|
||||
### Start cluster
|
||||
|
||||
You can start the cluster using either of the following commands:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=started'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=started' --become all`
|
||||
|
||||
Below is example output, remember that Ansible is idempotent so re-running a
|
||||
command may not necessarily change the state.
|
||||
|
||||
**Playbook method output**:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-1 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-2 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
### Stop cluster
|
||||
|
||||
You can stop the cluster using either of the following commands:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=stopped'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=stopped' --become all`
|
||||
|
||||
Below is example output, remember that Ansible is idempotent so re-running a
|
||||
command may not necessarily change the state.
|
||||
|
||||
**Playbook method output**:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-1 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-2 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
### Restart cluster
|
||||
|
||||
Just like the `service` module, you can also specify `restarted` as a state.
|
||||
This will do `stop` followed by `start`.
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become all`
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-2 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
You can limit the targets by adding the `-l` flag to your `ansible-playbook`
|
||||
command, or simply target your ad-hoc commands. For example, in a 3 node
|
||||
cluster (called `kube-0`, `kube-1` and `kube-2`) we can limit the restart to
|
||||
`kube-1` and `kube-2` with the following:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted' -l "kube-1,kube-2"`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become "kube-1,kube-2"`
|
||||
|
||||
```text
|
||||
PLAY RECAP ********************************************************************************************************
|
||||
kube-1 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-2 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
1. _Why might I use the `ansible-playbook` command over an ad-hoc command?_
|
||||
- The stop/start tasks will be aware of configuration. As the role
|
||||
develops, there might be some pre-tasks added to change how a cluster
|
||||
is stopped or started.
|
|
@ -0,0 +1,52 @@
|
|||
# Updating k3s
|
||||
|
||||
## Before you start!
|
||||
|
||||
Ensure you back up your k3s cluster. This is particularly important if you use
|
||||
an external datastore or embedded Etcd. Please refer to the below guide to
|
||||
backing up your k3s datastore:
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/backup-restore/
|
||||
|
||||
Also, check your volume backups are also working!
|
||||
|
||||
## Proceedure
|
||||
|
||||
### Updates using Ansible
|
||||
|
||||
To update via Ansible, set `k3s_release_version` to the target version you wish
|
||||
to go to. For example, from your `v1.19.3+k3s1` playbook:
|
||||
|
||||
```yaml
|
||||
---
|
||||
# BEFORE
|
||||
|
||||
- name: Provision k3s cluster
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_release_version: v1.19.3+k3s1
|
||||
roles:
|
||||
- name: xanmanning.k3s
|
||||
```
|
||||
|
||||
Updating to `v1.20.2+k3s1`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
# AFTER
|
||||
|
||||
- name: Provision k3s cluster
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_release_version: v1.20.2+k3s1
|
||||
roles:
|
||||
- name: xanmanning.k3s
|
||||
```
|
||||
|
||||
### Automatic updates
|
||||
|
||||
For automatic updates, consider installing Rancher's
|
||||
[system-upgrade-controller](https://rancher.com/docs/k3s/latest/en/upgrades/automated/)
|
||||
|
||||
**Please note**, to be able to update using the system-upgrade-controller you
|
||||
will need to set `k3s_install_hard_links` to `true`.
|
|
@ -0,0 +1,147 @@
|
|||
# Quickstart: K3s cluster with a single control node
|
||||
|
||||
This is the quickstart guide to creating your own k3s cluster with one control
|
||||
plane node. This control plane node will also be a worker.
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
servers over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ cluster.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our servers called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-2 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for the k3s cluster (`cluster.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a cluster with a single control node
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_become: true
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml cluster.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes. The default behavior of this role is to delegate the first play host as
|
||||
the control node, so kube-0 will have more changed tasks than others:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=56 changed=11 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-1 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0
|
||||
kube-2 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into kube-0, we can test that k3s is running across the cluster,
|
||||
that all nodes are ready and that everything is ready to execute our Kubernetes
|
||||
workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes -o wide`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: '0644'` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready master 34s v1.19.4+k3s1 10.0.2.15 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-2 Ready <none> 14s v1.19.4+k3s1 10.0.2.17 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-1 Ready <none> 14s v1.19.4+k3s1 10.0.2.16 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
ansible@kube-0:~$
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system local-path-provisioner-7ff9579c6-72j8x 1/1 Running 0 55s 10.42.2.2 kube-1 <none> <none>
|
||||
kube-system metrics-server-7b4f8b595-lkspj 1/1 Running 0 55s 10.42.1.2 kube-2 <none> <none>
|
||||
kube-system helm-install-traefik-b6vnt 0/1 Completed 0 55s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system coredns-66c464876b-llsh7 1/1 Running 0 55s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-jrqg7 2/2 Running 0 27s 10.42.1.3 kube-2 <none> <none>
|
||||
kube-system svclb-traefik-gh65q 2/2 Running 0 27s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-5z7zp 2/2 Running 0 27s 10.42.2.3 kube-1 <none> <none>
|
||||
kube-system traefik-5dd496474-l2k74 1/1 Running 0 27s 10.42.1.4 kube-2 <none> <none>
|
||||
```
|
|
@ -0,0 +1,154 @@
|
|||
# Quickstart: K3s cluster with a HA control plane using embedded etcd
|
||||
|
||||
This is the quickstart guide to creating your own 3 node k3s cluster with a
|
||||
highly available control plane using the embedded etcd datastore.
|
||||
The control plane will all be workers as well.
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
servers over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ ha_cluster.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our servers called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
# We're adding k3s_control_node to each host, this can be done in host_vars/
|
||||
# or group_vars/ as well - but for simplicity we are setting it here.
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-2 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for the k3s cluster (`ha_cluster.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a cluster with HA control plane
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_become: true
|
||||
k3s_etcd_datastore: true
|
||||
k3s_use_experimental: true # Note this is required for k3s < v1.19.5+k3s1
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml ha_cluster.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes. The default behavior of this role is to delegate the first play host as
|
||||
the primary control node, so kube-0 will have more changed tasks than others:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=53 changed=8 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0
|
||||
kube-1 : ok=47 changed=10 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-2 : ok=47 changed=9 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into any of the servers (it doesn't matter), we can test that k3s
|
||||
is running across the cluster, that all nodes are ready and that everything is
|
||||
ready to execute our Kubernetes workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes -o wide`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: '0644'` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready etcd,master 2m58s v1.19.4+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-1 Ready etcd,master 2m22s v1.19.4+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-2 Ready etcd,master 2m10s v1.19.4+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system coredns-66c464876b-rhgn6 1/1 Running 0 3m38s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-vwglv 0/1 Completed 0 3m39s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system local-path-provisioner-7ff9579c6-d5xpb 1/1 Running 0 3m38s 10.42.0.5 kube-0 <none> <none>
|
||||
kube-system metrics-server-7b4f8b595-nhbt8 1/1 Running 0 3m38s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-9lzcq 2/2 Running 0 2m56s 10.42.1.2 kube-1 <none> <none>
|
||||
kube-system svclb-traefik-vq487 2/2 Running 0 2m45s 10.42.2.2 kube-2 <none> <none>
|
||||
kube-system svclb-traefik-wkwkk 2/2 Running 0 3m1s 10.42.0.7 kube-0 <none> <none>
|
||||
kube-system traefik-5dd496474-lw6x8 1/1 Running 0 3m1s 10.42.0.6 kube-0 <none> <none>
|
||||
```
|
|
@ -0,0 +1,121 @@
|
|||
# Quickstart: K3s single node
|
||||
|
||||
This is the quickstart guide to creating your own single-node k3s "cluster".
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
server over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ single_node.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our server called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for a single node k3s cluster (`single_node.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a single node k3s cluster
|
||||
hosts: kube-0
|
||||
vars:
|
||||
k3s_become: true
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml single_node.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=39 changed=8 unreachable=0 failed=0 skipped=39 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into the server, we can test that k3s is running and that it is
|
||||
ready to execute our Kubernetes workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: '0644'` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kube-0 Ready master 5m27s v1.19.4+k3s
|
||||
ansible@kube-0:~$
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods --all-namespaces -o wide
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system metrics-server-7b4f8b595-k692h 1/1 Running 0 9m38s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system local-path-provisioner-7ff9579c6-5lgzb 1/1 Running 0 9m38s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system coredns-66c464876b-xg42q 1/1 Running 0 9m38s 10.42.0.5 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-tdpcs 0/1 Completed 0 9m38s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-hk248 2/2 Running 0 9m4s 10.42.0.7 kube-0 <none> <none>
|
||||
kube-system traefik-5dd496474-bf4kv 1/1 Running 0 9m4s 10.42.0.6 kube-0 <none> <none>
|
||||
```
|
|
@ -1,18 +1,39 @@
|
|||
---
|
||||
|
||||
- name: reload systemctl
|
||||
command: systemctl daemon-reload
|
||||
args:
|
||||
warn: false
|
||||
- name: Reload systemd
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
scope: "{{ k3s_systemd_context }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: restart k3s
|
||||
service:
|
||||
- name: Reload service
|
||||
ansible.builtin.set_fact:
|
||||
k3s_service_reloaded: true
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Restart k3s systemd
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: restarted
|
||||
enabled: true
|
||||
scope: "{{ k3s_systemd_context }}"
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: k3s_systemd_restart_k3s
|
||||
failed_when:
|
||||
- k3s_systemd_restart_k3s is not success
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: restart docker
|
||||
service:
|
||||
name: docker
|
||||
- name: Restart k3s service
|
||||
ansible.builtin.service:
|
||||
name: k3s
|
||||
state: restarted
|
||||
enabled: true
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
retries: 3
|
||||
delay: 3
|
||||
register: k3s_service_restart_k3s
|
||||
failed_when:
|
||||
- k3s_service_restart_k3s is not success
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become }}"
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
---
|
||||
|
||||
galaxy_info:
|
||||
role_name: k3s
|
||||
namespace: xanmanning
|
||||
author: Xan Manning
|
||||
description: Ansible role for installing k3s as either a standalone server or cluster
|
||||
description: Ansible role for installing k3s as either a standalone server or HA cluster
|
||||
company: Pyrat Ltd.
|
||||
github_branch: main
|
||||
|
||||
# If the issue tracker for your role is not on github, uncomment the
|
||||
# next line and provide a value
|
||||
|
@ -16,7 +21,7 @@ galaxy_info:
|
|||
# - CC-BY
|
||||
license: BSD
|
||||
|
||||
min_ansible_version: 2.6
|
||||
min_ansible_version: '2.9'
|
||||
|
||||
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||
# min_ansible_container_version:
|
||||
|
@ -26,20 +31,37 @@ galaxy_info:
|
|||
# Galaxy will use this branch. During import Galaxy will access files on
|
||||
# this branch. If Travis integration is configured, only notifications for this
|
||||
# branch will be accepted. Otherwise, in all cases, the repo's default branch
|
||||
# (usually master) will be used.
|
||||
#github_branch:
|
||||
# (usually main) will be used.
|
||||
# github_branch:
|
||||
|
||||
#
|
||||
# platforms is a list of platforms, and each platform has a name and a list of versions.
|
||||
#
|
||||
platforms:
|
||||
- name: Alpine
|
||||
versions:
|
||||
- all
|
||||
- name: Archlinux
|
||||
versions:
|
||||
- all
|
||||
- name: EL
|
||||
versions:
|
||||
- 7
|
||||
- 8
|
||||
- name: Amazon
|
||||
- name: Fedora
|
||||
versions:
|
||||
- 29
|
||||
- 30
|
||||
- 31
|
||||
- name: Debian
|
||||
versions:
|
||||
- buster
|
||||
- jessie
|
||||
- stretch
|
||||
- name: SLES
|
||||
versions:
|
||||
- 15
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- xenial
|
||||
|
@ -47,10 +69,11 @@ galaxy_info:
|
|||
|
||||
galaxy_tags:
|
||||
- k3s
|
||||
- k8s
|
||||
- kubernetes
|
||||
- docker
|
||||
- containerd
|
||||
- cluster
|
||||
- lightweight
|
||||
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||
# remove the '[]' above, if you add tags to this list.
|
||||
|
@ -59,5 +82,5 @@ galaxy_info:
|
|||
# Maximum 20 tags per role.
|
||||
|
||||
dependencies: []
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
|
||||
# if you add dependencies to this list.
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: node*
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_release_version: v1.22
|
||||
k3s_build_cluster: false
|
||||
k3s_control_token: 55ba04e5-e17d-4535-9170-3e4245453f4d
|
||||
k3s_install_dir: /opt/k3s/bin
|
||||
k3s_config_file: /opt/k3s/etc/k3s_config.yaml
|
||||
k3s_server:
|
||||
data-dir: /var/lib/k3s-io
|
||||
default-local-storage-path: /var/lib/k3s-io/local-storage
|
||||
disable:
|
||||
- metrics-server
|
||||
- traefik
|
||||
# k3s_agent:
|
||||
# snapshotter: native
|
||||
k3s_server_manifests_templates:
|
||||
- "molecule/autodeploy/templates/00-ns-monitoring.yml.j2"
|
||||
k3s_server_manifests_urls:
|
||||
- url: https://raw.githubusercontent.com/metallb/metallb/v0.9.6/manifests/namespace.yaml
|
||||
filename: 05-metallb-namespace.yml
|
||||
k3s_service_env_vars:
|
||||
K3S_TEST_VAR: "Hello world!"
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- check
|
||||
- converge
|
||||
- idempotence
|
||||
- side_effect
|
||||
- verify
|
||||
- cleanup
|
||||
- destroy
|
||||
platforms:
|
||||
- name: node1
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
- name: Prepare
|
||||
hosts: node*
|
||||
become: true
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated and iptables is installed
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Ensure install directory and configuration directory exists
|
||||
ansible.builtin.file:
|
||||
path: "/opt/k3s/{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
loop:
|
||||
- bin
|
||||
- etc
|
||||
|
||||
- name: Ensure data directory exists
|
||||
ansible.builtin.file:
|
||||
path: "/var/lib/k3s-io"
|
||||
state: directory
|
||||
mode: 0755
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: monitoring
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
# This is an example playbook to execute Ansible tests.
|
||||
|
||||
- name: Verify
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Example assertion
|
||||
ansible.builtin.assert:
|
||||
that: true
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
pyratlabs_issue_controller_dump: true
|
||||
# k3s_agent:
|
||||
# snapshotter: native
|
||||
pre_tasks:
|
||||
- name: Ensure k3s_debug is set
|
||||
ansible.builtin.set_fact:
|
||||
k3s_debug: true
|
||||
roles:
|
||||
- xanmanning.k3s
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- check
|
||||
- converge
|
||||
- idempotence
|
||||
- side_effect
|
||||
- verify
|
||||
- cleanup
|
||||
- destroy
|
||||
platforms:
|
||||
- name: node1
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated and iptables is installed
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
# This is an example playbook to execute Ansible tests.
|
||||
|
||||
- name: Verify
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Example assertion
|
||||
ansible.builtin.assert:
|
||||
that: true
|
|
@ -0,0 +1,26 @@
|
|||
# Molecule managed
|
||||
|
||||
{% if item.registry is defined %}
|
||||
FROM {{ item.registry.url }}/{{ item.image }}
|
||||
{% else %}
|
||||
FROM {{ item.image }}
|
||||
{% endif %}
|
||||
|
||||
RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python systemd sudo bash ca-certificates && apt-get clean; \
|
||||
elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python systemd sudo python-devel python*-dnf bash && dnf clean all; \
|
||||
elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python systemd sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \
|
||||
elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python systemd sudo bash python-xml && zypper clean -a; \
|
||||
elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo systemd bash ca-certificates; \
|
||||
elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python systemd sudo bash ca-certificates && xbps-remove -O; fi
|
||||
|
||||
RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
|
||||
rm -f /lib/systemd/system/multi-user.target.wants/*; \
|
||||
rm -f /etc/systemd/system/*.wants/*; \
|
||||
rm -f /lib/systemd/system/local-fs.target.wants/*; \
|
||||
rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
|
||||
rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
|
||||
rm -f /lib/systemd/system/basic.target.wants/*; \
|
||||
rm -f /lib/systemd/system/anaconda.target.wants/*;
|
||||
|
||||
VOLUME [“/sys/fs/cgroup”]
|
||||
CMD [“/usr/sbin/init”]
|
|
@ -0,0 +1,22 @@
|
|||
*******
|
||||
Docker driver installation guide
|
||||
*******
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
* Docker Engine
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
Please refer to the `Virtual environment`_ documentation for installation best
|
||||
practices. If not using a virtual environment, please consider passing the
|
||||
widely recommended `'--user' flag`_ when invoking ``pip``.
|
||||
|
||||
.. _Virtual environment: https://virtualenv.pypa.io/en/latest/
|
||||
.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'molecule[docker]'
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_install_hard_links: true
|
||||
k3s_release_version: stable
|
||||
# k3s_agent:
|
||||
# snapshotter: native
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- check
|
||||
- converge
|
||||
- idempotence
|
||||
- side_effect
|
||||
- verify
|
||||
- cleanup
|
||||
- destroy
|
||||
platforms:
|
||||
- name: node1
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: downloaded
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: restarted
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: node1
|
||||
become: true
|
||||
become_user: k3suser
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_use_experimental: true
|
||||
k3s_server:
|
||||
rootless: true
|
||||
k3s_agent:
|
||||
rootless: true
|
||||
k3s_install_dir: "/home/{{ ansible_user_id }}/bin"
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_build_cluster: false
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: started
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: stopped
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_state: uninstalled
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
- name: Prepare
|
||||
hosts: node1
|
||||
become: true
|
||||
tasks:
|
||||
- name: Ensure a user group exists
|
||||
ansible.builtin.group:
|
||||
name: user
|
||||
state: present
|
||||
|
||||
- name: Ensure a normal user exists
|
||||
ansible.builtin.user:
|
||||
name: k3suser
|
||||
group: user
|
||||
state: present
|
||||
|
||||
- name: Ensure a normal user has bin directory
|
||||
ansible.builtin.file:
|
||||
path: /home/k3suser/bin
|
||||
state: directory
|
||||
owner: k3suser
|
||||
group: user
|
||||
mode: 0700
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated and iptables is installed
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
|
@ -0,0 +1,14 @@
|
|||
import os
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
|
||||
def test_hosts_file(host):
|
||||
f = host.file('/etc/hosts')
|
||||
|
||||
assert f.exists
|
||||
assert f.user == 'root'
|
||||
assert f.group == 'root'
|
Binary file not shown.
|
@ -0,0 +1,7 @@
|
|||
# Molecule managed
|
||||
|
||||
{% if item.registry is defined %}
|
||||
FROM {{ item.registry.url }}/{{ item.image }}
|
||||
{% else %}
|
||||
FROM {{ item.image }}
|
||||
{% endif %}
|
|
@ -0,0 +1,22 @@
|
|||
*******
|
||||
Docker driver installation guide
|
||||
*******
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
* Docker Engine
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
Please refer to the `Virtual environment`_ documentation for installation best
|
||||
practices. If not using a virtual environment, please consider passing the
|
||||
widely recommended `'--user' flag`_ when invoking ``pip``.
|
||||
|
||||
.. _Virtual environment: https://virtualenv.pypa.io/en/latest/
|
||||
.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install 'molecule[docker]'
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
|
||||
- name: Converge
|
||||
hosts: node*
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_registration_address: loadbalancer
|
||||
k3s_control_token: 55ba04e5-e17d-4535-9170-3e4245453f4d
|
||||
k3s_server:
|
||||
datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
|
||||
# k3s_agent:
|
||||
# snapshotter: native
|
||||
k3s_service_env_file: /tmp/k3s.env
|
||||
pre_tasks:
|
||||
- name: Set each node to be a control node
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_node: true
|
||||
when: inventory_hostname in ['node2', 'node3']
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,13 @@
|
|||
frontend loadbalancer
|
||||
bind *:6443
|
||||
mode tcp
|
||||
default_backend control_nodes
|
||||
timeout client 1m
|
||||
|
||||
backend control_nodes
|
||||
mode tcp
|
||||
balance roundrobin
|
||||
server node2 node2:6443
|
||||
server node3 node3:6443
|
||||
timeout connect 30s
|
||||
timeout server 30m
|
|
@ -0,0 +1,68 @@
|
|||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- check
|
||||
- converge
|
||||
- idempotence
|
||||
- side_effect
|
||||
- verify
|
||||
- cleanup
|
||||
- destroy
|
||||
platforms:
|
||||
- name: node1
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: database
|
||||
image: postgres:11-alpine
|
||||
pre_build_image: true
|
||||
command: "postgres"
|
||||
env:
|
||||
POSTGRES_PASSWORD: "verybadpass"
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: loadbalancer
|
||||
image: geerlingguy/docker-rockylinux8-ansible:latest
|
||||
pre_build_image: true
|
||||
ports:
|
||||
- "6443:6443"
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
- name: Prepare Load Balancer
|
||||
hosts: loadbalancer
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Ensure HAProxy is installed
|
||||
ansible.builtin.package:
|
||||
name: haproxy
|
||||
state: present
|
||||
|
||||
- name: Ensure HAProxy config directory exists
|
||||
ansible.builtin.file:
|
||||
path: /usr/local/etc/haproxy
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Ensure HAProxy is configured
|
||||
ansible.builtin.template:
|
||||
src: haproxy-loadbalancer.conf.j2
|
||||
dest: /usr/local/etc/haproxy/haproxy.cfg
|
||||
mode: 0644
|
||||
|
||||
- name: Ensure HAProxy service is started
|
||||
ansible.builtin.command:
|
||||
cmd: haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid
|
||||
args:
|
||||
creates: /var/run/haproxy.pid
|
||||
|
||||
- name: Prepare nodes
|
||||
hosts: node*
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated and iptables is installed
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Ensure environment file exists for k3s_service_env_file
|
||||
ansible.builtin.lineinfile:
|
||||
path: /tmp/k3s.env
|
||||
line: "THISHOST={{ ansible_hostname }}"
|
||||
mode: 0644
|
||||
create: true
|
|
@ -0,0 +1,14 @@
|
|||
import os
|
||||
|
||||
import testinfra.utils.ansible_runner
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
|
||||
def test_hosts_file(host):
|
||||
f = host.file('/etc/hosts')
|
||||
|
||||
assert f.exists
|
||||
assert f.user == 'root'
|
||||
assert f.group == 'root'
|
Binary file not shown.
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
|
||||
- name: Converge
|
||||
hosts: node*
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_release_version: "v1.21"
|
||||
k3s_use_experimental: true
|
||||
k3s_etcd_datastore: true
|
||||
k3s_server:
|
||||
secrets-encryption: true
|
||||
k3s_agent:
|
||||
node-ip: "{{ ansible_default_ipv4.address }}"
|
||||
snapshotter: native
|
||||
selinux: "{{ ansible_os_family | lower == 'redhat' }}"
|
||||
k3s_skip_validation: "{{ k3s_service_handler[ansible_service_mgr] == 'service' }}"
|
||||
# k3s_skip_post_checks: "{{ ansible_os_family | lower == 'redhat' }}"
|
||||
pre_tasks:
|
||||
- name: Set each node to be a control node
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_node: true
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,13 @@
|
|||
frontend loadbalancer
|
||||
bind *:6443
|
||||
mode tcp
|
||||
default_backend control_nodes
|
||||
timeout client 1m
|
||||
|
||||
backend control_nodes
|
||||
mode tcp
|
||||
balance roundrobin
|
||||
server node2 node2:6443
|
||||
server node3 node3:6443
|
||||
timeout connect 30s
|
||||
timeout server 30m
|
|
@ -0,0 +1,60 @@
|
|||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- check
|
||||
- converge
|
||||
- idempotence
|
||||
- side_effect
|
||||
- verify
|
||||
- cleanup
|
||||
- destroy
|
||||
platforms:
|
||||
- name: node1
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: loadbalancer
|
||||
image: geerlingguy/docker-rockylinux8-ansible:latest
|
||||
pre_build_image: true
|
||||
ports:
|
||||
- "6443:6443"
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
|
||||
- name: Prepare all nodes
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Ensure sudo is installed
|
||||
community.general.apk:
|
||||
name: sudo
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apk'
|
||||
|
||||
- name: Prepare Load Balancer
|
||||
hosts: loadbalancer
|
||||
tasks:
|
||||
- name: Ensure HAProxy is installed
|
||||
ansible.builtin.package:
|
||||
name: haproxy
|
||||
state: present
|
||||
|
||||
- name: Ensure HAProxy config directory exists
|
||||
ansible.builtin.file:
|
||||
path: /usr/local/etc/haproxy
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Ensure HAProxy is configured
|
||||
ansible.builtin.template:
|
||||
src: haproxy-loadbalancer.conf.j2
|
||||
dest: /usr/local/etc/haproxy/haproxy.cfg
|
||||
mode: 0644
|
||||
|
||||
- name: Ensure HAProxy service is started
|
||||
ansible.builtin.command:
|
||||
cmd: haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid
|
||||
args:
|
||||
creates: /var/run/haproxy.pid
|
||||
|
||||
- name: Prepare nodes
|
||||
hosts: node*
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated and iptables is installed
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Ensure iproute is installed
|
||||
ansible.builtin.dnf:
|
||||
name: iproute
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'dnf'
|
|
@ -0,0 +1,4 @@
|
|||
-r ../requirements.txt
|
||||
|
||||
yamllint>=1.25.0
|
||||
ansible-lint>=4.3.5
|
|
@ -0,0 +1 @@
|
|||
files/*
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_server: "{{ lookup('file', 'k3s_server.yml') | from_yaml }}"
|
||||
k3s_agent: "{{ lookup('file', 'k3s_agent.yml') | from_yaml }}"
|
||||
k3s_airgap: true
|
||||
k3s_release_version: latest
|
||||
roles:
|
||||
- role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}"
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
|
||||
node-label:
|
||||
- "foo=bar"
|
||||
- "hello=world"
|
||||
kubelet-arg:
|
||||
- "cloud-provider=external"
|
||||
- "provider-id=azure"
|
||||
# snapshotter: native
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
|
||||
flannel-backend: 'none'
|
||||
disable-scheduler: true
|
||||
disable-cloud-controller: true
|
||||
disable-network-policy: true
|
||||
disable:
|
||||
- coredns
|
||||
- traefik
|
||||
- servicelb
|
||||
- local-storage
|
||||
- metrics-server
|
||||
node-taint:
|
||||
- "k3s-controlplane=true:NoExecute"
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: docker
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- check
|
||||
- converge
|
||||
- idempotence
|
||||
- side_effect
|
||||
- verify
|
||||
- cleanup
|
||||
- destroy
|
||||
platforms:
|
||||
- name: node1
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node2
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
- name: node3
|
||||
image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"}
|
||||
command: ${MOLECULE_DOCKER_COMMAND:-""}
|
||||
volumes:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
privileged: true
|
||||
pre_build_image: ${MOLECULE_PREBUILT:-true}
|
||||
networks:
|
||||
- name: k3snet
|
||||
provisioner:
|
||||
name: ansible
|
||||
options:
|
||||
verbose: true
|
||||
verifier:
|
||||
name: ansible
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Ensure apt cache is updated and iptables is installed
|
||||
ansible.builtin.apt:
|
||||
name: iptables
|
||||
state: present
|
||||
update_cache: true
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
|
||||
- name: Prepare air-gapped installation
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
block:
|
||||
|
||||
- name: Ensure files directory exists
|
||||
ansible.builtin.file:
|
||||
path: ./files
|
||||
state: directory
|
||||
mode: 0750
|
||||
|
||||
- name: Ensure k3s is downloaded for air-gap installation
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/v1.22.5%2Bk3s1/k3s
|
||||
dest: ./files/k3s
|
||||
mode: 0755
|
|
@ -0,0 +1,9 @@
|
|||
---
|
||||
# This is an example playbook to execute Ansible tests.
|
||||
|
||||
- name: Verify
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Example assertion
|
||||
ansible.builtin.assert:
|
||||
that: true
|
|
@ -0,0 +1,4 @@
|
|||
-r ../requirements.txt
|
||||
|
||||
molecule-plugins[docker]
|
||||
docker>=4.3.1
|
|
@ -0,0 +1 @@
|
|||
ansible>=2.10.7
|
|
@ -1,46 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ensure ansible_host is mapped to inventory_hostname
|
||||
lineinfile:
|
||||
path: /tmp/inventory.txt
|
||||
line: "{{ item }}@@@{{ hostvars[item].ansible_host }}@@@{{ hostvars[item].k3s_control_node }}"
|
||||
create: true
|
||||
loop: "{{ play_hosts }}"
|
||||
|
||||
- name: Lookup control node from file
|
||||
command: "grep 'True' /tmp/inventory.txt"
|
||||
changed_when: false
|
||||
register: k3s_control_delegate_raw
|
||||
|
||||
- name: Ensure control node is delegated to for obtaining a token
|
||||
set_fact:
|
||||
k3s_control_delegate: "{{ k3s_control_delegate_raw.stdout.split('@@@')[0] }}"
|
||||
|
||||
- name: Ensure the control node address is registered in Ansible
|
||||
set_fact:
|
||||
k3s_control_node_address: "{{ hostvars[k3s_control_delegate].ansible_host }}"
|
||||
|
||||
- name: Ensure NODE_TOKEN is captured from control node
|
||||
slurp:
|
||||
path: "/var/lib/rancher/k3s/server/node-token"
|
||||
register: k3s_control_token
|
||||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
|
||||
- name: Ensure k3s service unit file is present
|
||||
template:
|
||||
src: k3s.service.j2
|
||||
dest: /etc/systemd/system/k3s.service
|
||||
notify:
|
||||
- reload systemctl
|
||||
- restart k3s
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Wait for all nodes to be ready
|
||||
command: "{{ k3s_install_dir }}/kubectl get nodes"
|
||||
changed_when: false
|
||||
register: kubectl_get_nodes_result
|
||||
until: kubectl_get_nodes_result.stdout.find("NotReady") == -1
|
||||
retries: 30
|
||||
delay: 20
|
||||
when: k3s_control_node
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
|
||||
- name: Ensure systemd context is correct if we are running k3s rootless
|
||||
ansible.builtin.set_fact:
|
||||
k3s_systemd_context: user
|
||||
k3s_systemd_unit_dir: "{{ ansible_user_dir }}/.config/systemd/user"
|
||||
when:
|
||||
- k3s_runtime_config is defined
|
||||
- k3s_runtime_config.rootless is defined
|
||||
- k3s_runtime_config.rootless
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ensure target host architecture information is set as a fact
|
||||
set_fact:
|
||||
k3s_arch: "{{ k3s_arch_lookup[ansible_architecture].arch }}"
|
||||
k3s_arch_suffix: "{{ k3s_arch_lookup[ansible_architecture].suffix }}"
|
||||
|
||||
- name: Ensure URLs are set as facts for downloading binaries
|
||||
set_fact:
|
||||
k3s_binary_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/k3s{{ k3s_arch_suffix }}"
|
||||
k3s_hash_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/sha256sum-{{ k3s_arch }}.txt"
|
||||
|
||||
- name: Ensure the k3s hashsum is downloaded
|
||||
uri:
|
||||
url: "{{ k3s_hash_url }}"
|
||||
return_content: true
|
||||
register: k3s_hash_sum_raw
|
||||
|
||||
- name: Ensure sha256sum is set from hashsum variable
|
||||
shell: "echo \"{{ k3s_hash_sum_raw.content }}\" | grep 'k3s' | awk '{ print $1 }'"
|
||||
changed_when: false
|
||||
register: k3s_hash_sum
|
||||
|
||||
- name: Ensure k3s binary is downloaded
|
||||
get_url:
|
||||
url: "{{ k3s_binary_url }}"
|
||||
dest: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}"
|
||||
checksum: "sha256:{{ k3s_hash_sum.stdout }}"
|
||||
mode: 0755
|
|
@ -0,0 +1,108 @@
|
|||
---
|
||||
|
||||
- name: "Ensure cluster token is captured from {{ k3s_control_delegate }}"
|
||||
ansible.builtin.slurp:
|
||||
path: "{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/server/token"
|
||||
register: k3s_slurped_cluster_token
|
||||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
when:
|
||||
- k3s_control_token is not defined
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure cluster token is formatted correctly for use in templates
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_token_content: "{{ k3s_control_token | default(k3s_slurped_cluster_token.content | b64decode) }}"
|
||||
when:
|
||||
- k3s_control_token is not defined
|
||||
- not ansible_check_mode
|
||||
|
||||
- name: Ensure dummy cluster token is defined for ansible_check_mode
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_token_content: "{{ k3s_control_delegate | to_uuid }}"
|
||||
check_mode: false
|
||||
when:
|
||||
- ansible_check_mode
|
||||
|
||||
- name: Ensure the cluster token file location exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ k3s_token_location | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s cluster token file is present
|
||||
ansible.builtin.template:
|
||||
src: cluster-token.j2
|
||||
dest: "{{ k3s_token_location }}"
|
||||
mode: 0600
|
||||
become: "{{ k3s_become }}"
|
||||
notify:
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
|
||||
- name: Ensure k3s service unit file is present
|
||||
ansible.builtin.template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ k3s_systemd_unit_dir }}/k3s.service"
|
||||
mode: 0644
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_service_handler[ansible_service_mgr] == 'systemd'
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
|
||||
- name: Ensure k3s service file is present
|
||||
ansible.builtin.template:
|
||||
src: k3s.openrc.j2
|
||||
dest: "{{ k3s_openrc_service_dir }}/k3s"
|
||||
mode: 0744
|
||||
when:
|
||||
- k3s_service_handler[ansible_service_mgr] == 'service'
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s logrotate file is present
|
||||
ansible.builtin.template:
|
||||
src: k3s.logrotate.j2
|
||||
dest: "{{ k3s_logrotate_dir }}/k3s"
|
||||
mode: 0640
|
||||
when:
|
||||
- k3s_service_handler[ansible_service_mgr] == 'service'
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s config file exists
|
||||
ansible.builtin.template:
|
||||
src: config.yaml.j2
|
||||
dest: "{{ k3s_config_file }}"
|
||||
mode: 0644
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure secondary controllers are started
|
||||
ansible.builtin.include_tasks: ensure_control_plane_started_{{ ansible_service_mgr }}.yml
|
||||
when:
|
||||
- k3s_control_node
|
||||
- not k3s_primary_control_node
|
||||
|
||||
- name: Run control plane post checks
|
||||
ansible.builtin.import_tasks: post_checks_control_plane.yml
|
||||
when:
|
||||
- not k3s_skip_validation
|
||||
- not k3s_skip_post_checks
|
||||
|
||||
- name: Flush Handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Run node post checks
|
||||
ansible.builtin.import_tasks: post_checks_nodes.yml
|
||||
when:
|
||||
- not k3s_skip_validation
|
||||
- not k3s_skip_post_checks
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
|
||||
- name: Ensure containerd registries file exists
|
||||
ansible.builtin.template:
|
||||
src: registries.yaml.j2
|
||||
dest: "{{ k3s_config_dir }}/registries.yaml"
|
||||
mode: 0600
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s control plane server is started
|
||||
ansible.builtin.service:
|
||||
name: k3s
|
||||
state: started
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
register: k3s_service_start_k3s
|
||||
until: k3s_service_start_k3s is succeeded
|
||||
retries: 3
|
||||
delay: 3
|
||||
failed_when:
|
||||
- k3s_service_start_k3s is not succeeded
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s control plane server is started
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: started
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
scope: "{{ k3s_systemd_context }}"
|
||||
register: k3s_systemd_start_k3s
|
||||
until: k3s_systemd_start_k3s is succeeded
|
||||
retries: 3
|
||||
delay: 3
|
||||
failed_when:
|
||||
- k3s_systemd_start_k3s is not succeeded
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
|
||||
- name: Ensure {{ directory.name }} exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ directory.path }}"
|
||||
state: directory
|
||||
mode: "{{ directory.mode | default(755) }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- directory.path is defined
|
||||
- directory.path | length > 0
|
||||
- directory.path != omit
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
|
||||
- name: Ensure target host architecture information is set as a fact
|
||||
ansible.builtin.set_fact:
|
||||
k3s_arch: "{{ k3s_arch_lookup[ansible_architecture].arch }}"
|
||||
k3s_arch_suffix: "{{ k3s_arch_lookup[ansible_architecture].suffix }}"
|
||||
check_mode: false
|
||||
|
||||
- name: Ensure URLs are set as facts for downloading binaries
|
||||
ansible.builtin.set_fact:
|
||||
k3s_binary_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/k3s{{ k3s_arch_suffix }}"
|
||||
k3s_hash_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/sha256sum-{{ k3s_arch }}.txt"
|
||||
check_mode: false
|
||||
|
||||
- name: Override k3s_binary_url and k3s_hash_url facts for testing specific commit
|
||||
ansible.builtin.set_fact:
|
||||
k3s_binary_url: "https://storage.googleapis.com/k3s-ci-builds/k3s{{ k3s_arch_suffix }}-{{ k3s_release_version }}"
|
||||
k3s_hash_url: "https://storage.googleapis.com/k3s-ci-builds/k3s{{ k3s_arch_suffix }}-{{ k3s_release_version }}.sha256sum"
|
||||
when:
|
||||
- k3s_release_version | regex_search("^[a-z0-9]{40}$")
|
||||
check_mode: false
|
||||
|
||||
- name: Ensure the k3s hashsum is downloaded
|
||||
ansible.builtin.uri:
|
||||
url: "{{ k3s_hash_url }}"
|
||||
return_content: true
|
||||
register: k3s_hash_sum_raw
|
||||
check_mode: false
|
||||
|
||||
- name: Ensure sha256sum is set from hashsum variable
|
||||
ansible.builtin.set_fact:
|
||||
k3s_hash_sum: "{{ (k3s_hash_sum_raw.content.split('\n') |
|
||||
select('search', 'k3s' + k3s_arch_suffix) |
|
||||
reject('search', 'images') |
|
||||
first).split() | first }}"
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
|
||||
- name: Ensure installation directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ k3s_install_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Ensure k3s binary is downloaded
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ k3s_binary_url }}"
|
||||
dest: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}"
|
||||
checksum: "sha256:{{ k3s_hash_sum }}"
|
||||
mode: 0755
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,54 @@
|
|||
---
|
||||
|
||||
- name: Check if kubectl exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ k3s_install_dir }}/kubectl"
|
||||
register: k3s_check_kubectl
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Clean up nodes that are in an uninstalled state
|
||||
when:
|
||||
- k3s_check_kubectl.stat.exists is defined
|
||||
- k3s_check_kubectl.stat.exists
|
||||
- k3s_control_delegate is defined
|
||||
- not ansible_check_mode
|
||||
block:
|
||||
- name: Gather a list of nodes
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_install_dir }}/kubectl get nodes"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
run_once: true
|
||||
register: kubectl_get_nodes_result
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure uninstalled nodes are drained # noqa no-changed-when
|
||||
ansible.builtin.command:
|
||||
cmd: >-
|
||||
{{ k3s_install_dir }}/kubectl drain {{ hostvars[item].ansible_hostname }}
|
||||
--ignore-daemonsets
|
||||
--{{ k3s_drain_command[ansible_version.string is version_compare('1.22', '>=')] }}
|
||||
--force
|
||||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
run_once: true
|
||||
when:
|
||||
- kubectl_get_nodes_result.stdout is defined
|
||||
- hostvars[item].ansible_hostname in kubectl_get_nodes_result.stdout
|
||||
- hostvars[item].k3s_state is defined
|
||||
- hostvars[item].k3s_state == 'uninstalled'
|
||||
loop: "{{ ansible_play_hosts }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure uninstalled nodes are removed # noqa no-changed-when
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_install_dir }}/kubectl delete node {{ hostvars[item].ansible_hostname }}"
|
||||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
run_once: true
|
||||
when:
|
||||
- kubectl_get_nodes_result.stdout is defined
|
||||
- hostvars[item].ansible_hostname in kubectl_get_nodes_result.stdout
|
||||
- hostvars[item].k3s_state is defined
|
||||
- hostvars[item].k3s_state == 'uninstalled'
|
||||
loop: "{{ ansible_play_hosts }}"
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
|
||||
- name: Ensure directories exist
|
||||
ansible.builtin.include_tasks: ensure_directories.yml
|
||||
loop: "{{ k3s_ensure_directories_exist }}"
|
||||
loop_control:
|
||||
loop_var: directory
|
||||
|
||||
- name: Ensure installed node
|
||||
ansible.builtin.include_tasks: ensure_installed_node.yml
|
||||
when:
|
||||
- ((k3s_control_node and k3s_controller_list | length == 1)
|
||||
or (k3s_primary_control_node and k3s_controller_list | length > 1))
|
||||
- not ansible_check_mode
|
||||
|
||||
- name: Flush Handlers
|
||||
ansible.builtin.meta: flush_handlers
|
||||
|
||||
- name: Ensure installed node | k3s_build_cluster
|
||||
ansible.builtin.include_tasks: ensure_installed_node.yml
|
||||
when: k3s_build_cluster
|
||||
|
||||
- name: Determine if the systems are already clustered
|
||||
ansible.builtin.stat:
|
||||
path: "{{ k3s_token_location }}"
|
||||
register: k3s_token_cluster_check
|
||||
|
||||
- name: Ensure control plane started with {{ ansible_service_mgr }}
|
||||
ansible.builtin.include_tasks: ensure_control_plane_started_{{ ansible_service_mgr }}.yml
|
||||
when: (k3s_control_node and k3s_controller_list | length == 1)
|
||||
or (k3s_primary_control_node and k3s_controller_list | length > 1)
|
||||
or k3s_token_cluster_check.stat.exists
|
|
@ -0,0 +1,103 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s is linked into the installation destination
|
||||
ansible.builtin.file:
|
||||
src: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}"
|
||||
dest: "{{ k3s_install_dir }}/{{ item }}"
|
||||
state: "{{ 'hard' if k3s_install_hard_links else 'link' }}"
|
||||
force: "{{ k3s_install_hard_links }}"
|
||||
mode: 0755
|
||||
loop:
|
||||
- k3s
|
||||
- kubectl
|
||||
- crictl
|
||||
- ctr
|
||||
when: not ansible_check_mode
|
||||
notify:
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s config file exists
|
||||
ansible.builtin.template:
|
||||
src: config.yaml.j2
|
||||
dest: "{{ k3s_config_file }}"
|
||||
mode: 0644
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure cluster token is present when pre-defined
|
||||
when: k3s_control_token is defined
|
||||
block:
|
||||
- name: Ensure the cluster token file location exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ k3s_token_location | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s cluster token file is present
|
||||
ansible.builtin.template:
|
||||
src: cluster-token.j2
|
||||
dest: "{{ k3s_token_location }}"
|
||||
mode: 0600
|
||||
become: "{{ k3s_become }}"
|
||||
notify:
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
|
||||
- name: Ensure k3s service unit file is present
|
||||
ansible.builtin.template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ k3s_systemd_unit_dir }}/k3s.service"
|
||||
mode: 0644
|
||||
when:
|
||||
- k3s_service_handler[ansible_service_mgr] == 'systemd'
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s service file is present
|
||||
ansible.builtin.template:
|
||||
src: k3s.openrc.j2
|
||||
dest: "{{ k3s_openrc_service_dir }}/k3s"
|
||||
mode: 0744
|
||||
when:
|
||||
- k3s_service_handler[ansible_service_mgr] == 'service'
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s logrotate file is present
|
||||
ansible.builtin.template:
|
||||
src: k3s.logrotate.j2
|
||||
dest: "{{ k3s_logrotate_dir }}/k3s"
|
||||
mode: 0640
|
||||
when:
|
||||
- k3s_service_handler[ansible_service_mgr] == 'service'
|
||||
notify:
|
||||
- "Reload {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
- "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}"
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s killall script is present
|
||||
ansible.builtin.template:
|
||||
src: k3s-killall.sh.j2
|
||||
dest: "/usr/local/bin/k3s-killall.sh"
|
||||
mode: 0700
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_runtime_config is defined
|
||||
- ("rootless" not in k3s_runtime_config or not k3s_runtime_config.rootless)
|
||||
|
||||
- name: Ensure k3s uninstall script is present
|
||||
ansible.builtin.template:
|
||||
src: k3s-uninstall.sh.j2
|
||||
dest: "/usr/local/bin/k3s-uninstall.sh"
|
||||
mode: 0700
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_runtime_config is defined
|
||||
- ("rootless" not in k3s_runtime_config or not k3s_runtime_config.rootless)
|
|
@ -0,0 +1,70 @@
|
|||
---
|
||||
|
||||
- name: Ensure that the manifests directory exists
|
||||
ansible.builtin.file:
|
||||
state: directory
|
||||
path: "{{ k3s_server_manifests_dir }}"
|
||||
mode: 0755
|
||||
when: >-
|
||||
k3s_primary_control_node and
|
||||
(k3s_server_manifests_templates | length > 0
|
||||
or k3s_server_manifests_urls | length > 0)
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure that the pod-manifests directory exists
|
||||
ansible.builtin.file:
|
||||
state: directory
|
||||
path: "{{ k3s_server_pod_manifests_dir }}"
|
||||
mode: 0755
|
||||
when: >-
|
||||
k3s_control_node and
|
||||
(k3s_server_pod_manifests_templates | length > 0
|
||||
or k3s_server_pod_manifests_urls | length > 0)
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
# https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests
|
||||
- name: Ensure auto-deploying manifests are copied to the primary controller
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ k3s_server_manifests_dir }}/{{ item | basename | replace('.j2', '') }}"
|
||||
mode: 0644
|
||||
loop: "{{ k3s_server_manifests_templates }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_primary_control_node
|
||||
- k3s_server_manifests_templates | length > 0
|
||||
|
||||
- name: Ensure auto-deploying manifests are downloaded to the primary controller
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ item.url }}"
|
||||
dest: "{{ k3s_server_manifests_dir }}/{{ item.filename | default(item.url | basename) }}"
|
||||
mode: 0644
|
||||
loop: "{{ k3s_server_manifests_urls }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_primary_control_node
|
||||
- not ansible_check_mode
|
||||
- k3s_server_manifests_urls | length > 0
|
||||
|
||||
# https://github.com/k3s-io/k3s/pull/1691
|
||||
- name: Ensure static pod manifests are copied to controllers
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ k3s_server_pod_manifests_dir }}/{{ item | basename | replace('.j2', '') }}"
|
||||
mode: 0644
|
||||
loop: "{{ k3s_server_pod_manifests_templates }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_control_node
|
||||
|
||||
# https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests
|
||||
- name: Ensure auto-deploying manifests are downloaded to the primary controller
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ item.url }}"
|
||||
dest: "{{ k3s_server_pod_manifests_dir }}/{{ item.filename | default(item.url | basename) }}"
|
||||
mode: 0644
|
||||
loop: "{{ k3s_server_pod_manifests_urls }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when:
|
||||
- k3s_control_node
|
||||
- not ansible_check_mode
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
|
||||
- name: Ensure that the config.yaml.d directory exists
|
||||
ansible.builtin.file:
|
||||
state: directory
|
||||
path: "{{ k3s_config_yaml_d_dir }}"
|
||||
mode: 0755
|
||||
when: >-
|
||||
k3s_server_config_yaml_d_files | length > 0
|
||||
or k3s_agent_config_yaml_d_files | length > 0
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
# https://github.com/k3s-io/k3s/pull/3162
|
||||
- name: Ensure configuration files are copied to controllers
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ k3s_config_yaml_d_dir }}/{{ item | basename | replace('.j2', '') }}"
|
||||
mode: 0644
|
||||
loop: "{{ k3s_server_config_yaml_d_files }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when: k3s_control_node
|
||||
|
||||
# https://github.com/k3s-io/k3s/pull/3162
|
||||
- name: Ensure configuration files are copied to agents
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ k3s_config_yaml_d_dir }}/{{ item | basename | replace('.j2', '') }}"
|
||||
mode: 0644
|
||||
loop: "{{ k3s_agent_config_yaml_d_files }}"
|
||||
become: "{{ k3s_become }}"
|
||||
when: not k3s_control_node
|
|
@ -0,0 +1,163 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s_build_cluster is false if running against a single node.
|
||||
ansible.builtin.set_fact:
|
||||
k3s_build_cluster: false
|
||||
when:
|
||||
- ansible_play_hosts | length < 2
|
||||
- k3s_registration_address is not defined
|
||||
|
||||
- name: Ensure k3s control node fact is set
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_node: "{{ not k3s_build_cluster }}"
|
||||
when: k3s_control_node is not defined
|
||||
|
||||
- name: Ensure k3s primary control node fact is set
|
||||
ansible.builtin.set_fact:
|
||||
k3s_primary_control_node: "{{ not k3s_build_cluster }}"
|
||||
when: k3s_primary_control_node is not defined
|
||||
|
||||
- name: Ensure k3s control plane port is captured
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_plane_port: "{{ k3s_runtime_config['https-listen-port'] | default(6443) }}"
|
||||
delegate_to: k3s_primary_control_node
|
||||
|
||||
- name: Ensure k3s node IP is configured when node-ip is defined
|
||||
ansible.builtin.set_fact:
|
||||
k3s_node_ip: "{{ k3s_runtime_config['node-ip'] }}"
|
||||
when:
|
||||
- k3s_runtime_config['node-ip'] is defined
|
||||
|
||||
- name: Ensure a count of control nodes is generated from ansible_play_hosts
|
||||
ansible.builtin.set_fact:
|
||||
k3s_controller_list: "{{ k3s_controller_list + [item] }}"
|
||||
when:
|
||||
- hostvars[item].k3s_control_node is defined
|
||||
- hostvars[item].k3s_control_node
|
||||
loop: "{{ ansible_play_hosts }}"
|
||||
|
||||
- name: Ensure a k3s control node is defined if none are found in ansible_play_hosts
|
||||
when:
|
||||
- k3s_controller_list | length < 1
|
||||
- k3s_build_cluster is defined
|
||||
- k3s_build_cluster
|
||||
block:
|
||||
- name: Set the control host
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_node: true
|
||||
when: inventory_hostname == ansible_play_hosts[0]
|
||||
|
||||
- name: Ensure a count of control nodes is generated
|
||||
ansible.builtin.set_fact:
|
||||
k3s_controller_list: "{{ k3s_controller_list + [item] }}"
|
||||
when:
|
||||
- hostvars[item].k3s_control_node is defined
|
||||
- hostvars[item].k3s_control_node
|
||||
loop: "{{ ansible_play_hosts }}"
|
||||
|
||||
- name: Ensure an existing primary k3s control node is defined if multiple are found and at least one is running
|
||||
when:
|
||||
- k3s_controller_list | length >= 1
|
||||
- k3s_build_cluster is defined
|
||||
- k3s_build_cluster
|
||||
- k3s_control_delegate is not defined
|
||||
block:
|
||||
- name: Test if control plane is running
|
||||
ansible.builtin.wait_for:
|
||||
port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}"
|
||||
host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}"
|
||||
timeout: 5
|
||||
register: k3s_control_node_running
|
||||
ignore_errors: true
|
||||
when: k3s_control_node
|
||||
|
||||
- name: List running control planes
|
||||
ansible.builtin.set_fact:
|
||||
k3s_running_controller_list: "{{ k3s_running_controller_list + [item] }}"
|
||||
when:
|
||||
- hostvars[item].k3s_control_node_running is not skipped
|
||||
- hostvars[item].k3s_control_node_running is succeeded
|
||||
loop: "{{ ansible_play_hosts }}"
|
||||
|
||||
- name: Choose first running node as delegate
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_delegate: "{{ k3s_running_controller_list[0] }}"
|
||||
when: k3s_running_controller_list | length >= 1
|
||||
|
||||
- name: Ensure k3s_primary_control_node is set on the delegate
|
||||
ansible.builtin.set_fact:
|
||||
k3s_primary_control_node: true
|
||||
when:
|
||||
- k3s_control_delegate is defined
|
||||
- inventory_hostname == k3s_control_delegate
|
||||
|
||||
- name: Ensure a primary k3s control node is defined if multiple are found in ansible_play_hosts
|
||||
ansible.builtin.set_fact:
|
||||
k3s_primary_control_node: true
|
||||
when:
|
||||
- k3s_controller_list is defined
|
||||
- inventory_hostname == k3s_controller_list[0]
|
||||
- k3s_build_cluster is defined
|
||||
- k3s_build_cluster
|
||||
- k3s_control_delegate is not defined
|
||||
|
||||
- name: Ensure ansible_host is mapped to inventory_hostname
|
||||
ansible.builtin.blockinfile:
|
||||
path: /tmp/inventory.txt
|
||||
block: |
|
||||
{% for host in ansible_play_hosts %}
|
||||
{% filter replace('\n', ' ') %}
|
||||
{{ host }}
|
||||
@@@
|
||||
{{ hostvars[host].ansible_host | default(hostvars[host].ansible_fqdn) | string }}
|
||||
@@@
|
||||
C_{{ hostvars[host].k3s_control_node | string }}
|
||||
@@@
|
||||
P_{{ hostvars[host].k3s_primary_control_node | default(False) | string }}
|
||||
{% endfilter %}
|
||||
@@@ END:{{ host }}
|
||||
{% endfor %}
|
||||
create: true
|
||||
mode: 0600
|
||||
check_mode: false
|
||||
when: k3s_control_node is defined
|
||||
|
||||
- name: Delegate an initializing control plane node
|
||||
when: k3s_registration_address is not defined
|
||||
or k3s_control_delegate is not defined
|
||||
block:
|
||||
- name: Lookup control node from file
|
||||
ansible.builtin.command:
|
||||
cmd: "grep -i '{{ 'P_True' if (k3s_controller_list | length > 1) else 'C_True' }}' /tmp/inventory.txt"
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
register: k3s_control_delegate_raw
|
||||
|
||||
- name: Ensure control node is delegated for obtaining a cluster token
|
||||
ansible.builtin.set_fact:
|
||||
k3s_control_delegate: "{{ k3s_control_delegate_raw.stdout.split(' @@@ ')[0] }}"
|
||||
check_mode: false
|
||||
when: k3s_control_delegate is not defined
|
||||
|
||||
- name: Ensure the node registration address is defined from k3s_control_node_address
|
||||
ansible.builtin.set_fact:
|
||||
k3s_registration_address: "{{ k3s_control_node_address }}"
|
||||
check_mode: false
|
||||
when: k3s_control_node_address is defined
|
||||
|
||||
- name: Ensure the node registration address is defined from node-ip
|
||||
ansible.builtin.set_fact:
|
||||
k3s_registration_address: "{{ hostvars[k3s_control_delegate].k3s_node_ip }}"
|
||||
check_mode: false
|
||||
when:
|
||||
- k3s_registration_address is not defined
|
||||
- k3s_control_node_address is not defined
|
||||
- hostvars[k3s_control_delegate].k3s_node_ip is defined
|
||||
|
||||
- name: Ensure the node registration address is defined
|
||||
ansible.builtin.set_fact:
|
||||
k3s_registration_address: "{{ hostvars[k3s_control_delegate].ansible_host | default(hostvars[k3s_control_delegate].ansible_fqdn) }}"
|
||||
check_mode: false
|
||||
when:
|
||||
- k3s_registration_address is not defined
|
||||
- k3s_control_node_address is not defined
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s service is started
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: started
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
when: k3s_non_root is not defined or not k3s_non_root
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s service is started
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: started
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
scope: user
|
||||
when:
|
||||
- k3s_non_root is defined
|
||||
- k3s_non_root
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s service is stopped
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: stopped
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
when: k3s_non_root is not defined or not k3s_non_root
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure k3s service is stopped
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: stopped
|
||||
enabled: "{{ k3s_start_on_boot }}"
|
||||
scope: user
|
||||
when:
|
||||
- k3s_non_root is defined
|
||||
- k3s_non_root
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
|
||||
- name: Check to see if k3s-killall.sh exits
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/k3s-killall.sh
|
||||
register: check_k3s_killall_script
|
||||
|
||||
- name: Check to see if k3s-uninstall.sh exits
|
||||
ansible.builtin.stat:
|
||||
path: /usr/local/bin/k3s-uninstall.sh
|
||||
register: check_k3s_uninstall_script
|
||||
|
||||
- name: Run k3s-killall.sh
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/local/bin/k3s-killall.sh
|
||||
register: k3s_killall
|
||||
changed_when: k3s_killall.rc == 0
|
||||
when: check_k3s_killall_script.stat.exists
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Run k3s-uninstall.sh
|
||||
ansible.builtin.command:
|
||||
cmd: /usr/local/bin/k3s-uninstall.sh
|
||||
args:
|
||||
removes: /usr/local/bin/k3s-uninstall.sh
|
||||
register: k3s_uninstall
|
||||
changed_when: k3s_uninstall.rc == 0
|
||||
when: check_k3s_uninstall_script.stat.exists
|
||||
become: "{{ k3s_become }}"
|
||||
|
||||
- name: Ensure hard links are removed
|
||||
ansible.builtin.file:
|
||||
path: "{{ k3s_install_dir }}/{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- kubectl
|
||||
- crictl
|
||||
- ctr
|
||||
when:
|
||||
- k3s_install_hard_links
|
||||
- not ansible_check_mode
|
||||
become: "{{ k3s_become }}"
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
|
||||
- name: Ensure installation directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ k3s_install_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
|
||||
- name: Ensure k3s binary is copied from controller to target host
|
||||
ansible.builtin.copy:
|
||||
src: k3s
|
||||
# TODO: allow airgap to bypass version post-fix
|
||||
dest: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}"
|
||||
mode: 0755
|
||||
become: "{{ k3s_become }}"
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Get the latest release version from GitHub
|
||||
uri:
|
||||
url: https://github.com/rancher/k3s/releases/latest
|
||||
register: k3s_latest_release
|
||||
|
||||
- name: Ensure the release version is set as a fact
|
||||
set_fact:
|
||||
k3s_release_version: "{{ k3s_latest_release.url.split('/')[-1] }}"
|
|
@ -1,27 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Ensure Docker prerequisites are installed
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
register: ensure_docker_prerequisites_installed
|
||||
until: ensure_docker_prerequisites_installed is succeeded
|
||||
retries: 3
|
||||
delay: 10
|
||||
loop:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- "{{ 'gnupg2' if ansible_distribution == 'Debian' else 'gnupg-agent' }}"
|
||||
- software-properties-common
|
||||
|
||||
- name: Ensure Docker APT key is present
|
||||
apt_key:
|
||||
url: https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg
|
||||
state: present
|
||||
|
||||
- name: Ensure Docker repository is installed and configured
|
||||
apt_repository:
|
||||
filename: docker-ce
|
||||
repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
|
||||
update_cache: true
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue