Testing auto-deploy on multi-master

This commit is contained in:
Xan Manning 2020-01-13 21:32:31 +00:00
parent c8fb27ecd1
commit e3ce213bc0
8 changed files with 129 additions and 59 deletions

View File

@ -8,6 +8,8 @@ services: docker
env:
global:
- ROLE_NAME: k3s
- ROLE_OWNER: xanmanning
- REPO_OWNER: PyratLabs
- MOLECULE_SCENARIO: default
matrix:
- MOLECULE_DISTRO: geerlingguy/docker-centos8-ansible:latest
@ -38,13 +40,21 @@ env:
# Test auto deploying manifests
- MOLECULE_DISTRO: geerlingguy/docker-ubuntu1804-ansible:latest
MOLECULE_PLAYBOOK: playbook-auto-deploying-manifests.yml
# Test multiple masters in control plane with PostgreSQL
- MOLECULE_DISTRO: geerlingguy/docker-centos8-ansible:latest
MOLECULE_SCENARIO_DIRECTORY: /home/travis/build/${REPO_OWNER}/${ROLE_OWNER}.${ROLE_NAME}/molecule/highavailability
MOLECULE_SCENARIO: highavailability
# Test multiple masters with auto deploying manifests
- MOLECULE_DISTRO: geerlingguy/docker-centos8-ansible:latest
MOLECULE_SCENARIO_DIRECTORY: /home/travis/build/${REPO_OWNER}/${ROLE_OWNER}.${ROLE_NAME}/molecule/highavailability
MOLECULE_SCENARIO: highavailability
MOLECULE_PLAYBOOK: playbook-auto-deploying-manifests.yml
# Test multiple masters in control plane with DQLite
- MOLECULE_DISTRO: geerlingguy/docker-centos8-ansible:latest
MOLECULE_SCENARIO_DIRECTORY: /home/travis/build/${REPO_OWNER}/${ROLE_OWNER}.${ROLE_NAME}/molecule/highavailability
MOLECULE_SCENARIO: highavailability
MOLECULE_PLAYBOOK: playbook-dqlite.yml
@ -55,8 +65,8 @@ install:
before_script:
# Use actual Ansible Galaxy role name for the project directory.
- cd ../
- mv ansible-role-${ROLE_NAME} xanmanning.${ROLE_NAME}
- cd xanmanning.${ROLE_NAME}
- mv ansible-role-${ROLE_NAME} ${ROLE_OWNER}.${ROLE_NAME}
- cd ${ROLE_OWNER}.${ROLE_NAME}
script:
# Run tests.

124
README.md
View File

@ -43,40 +43,42 @@ my spare time so I cannot promise a speedy fix delivery.
Below are variables that are set against all of the play hosts for environment
consistency.
| Variable | Description | Default Value |
|--------------------------------|--------------------------------------------------------------------------|--------------------------------|
| `k3s_cluster_state` | State of cluster: installed, started, stopped, restarted, downloaded. | installed |
| `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for latest. | `false` |
| `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/rancher/k3s |
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
| `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` |
| `k3s_non_root` | Install k3s as non-root user. See notes below. | `false` |
| `k3s_control_workers` | Are control hosts also workers? | `true` |
| `k3s_cluster_cidr` | Network CIDR to use for pod IPs | 10.42.0.0/16 |
| `k3s_service_cidr` | Network CIDR to use for service IPs | 10.43.0.0/16 |
| `k3s_control_node_address` | Use a specific control node address. IP or FQDN. | _NULL_ |
| `k3s_control_token` | Use a specific control token, please read notes below. | _NULL_ |
| `k3s_https_port` | HTTPS port listening port. | 6443 |
| `k3s_use_docker` | Use Docker rather than Containerd? | `false` |
| `k3s_no_flannel` | Do not use Flannel | `false` |
| `k3s_flannel_backend` | Flannel backend ('none', 'vxlan', 'ipsec', or 'wireguard') | vxlan |
| `k3s_no_coredns` | Do not use CoreDNS | `false` |
| `k3s_cluster_dns` | Cluster IP for CoreDNS service. Should be in your service-cidr range. | _NULL_ |
| `k3s_cluster_domain` | Cluster Domain. | cluster.local |
| `k3s_resolv_conf` | Kubelet resolv.conf file | _NULL_ |
| `k3s_no_traefik` | Do not use Traefik | `false` |
| `k3s_no_servicelb` | Do not use ServiceLB, necessary for using something like MetalLB. | `false` |
| `k3s_no_local_storage` | Do not use Local Storage | `false` |
| `k3s_no_metrics_server` | Do not deploy metrics server | `false` |
| `k3s_disable_scheduler` | Disable Kubernetes default scheduler | `false` |
| `k3s_disable_cloud_controller` | Disable k3s default cloud controller manager. | `false` |
| `k3s_disable_network_policy` | Disable k3s default network policy controller. | `false` |
| `k3s_write_kubeconfig_mode` | Define the file mode from the generated KubeConfig, eg. `644` | _NULL_ |
| `k3s_datastore_endpoint` | Define the database or etcd cluster endpoint for HA. | _NULL_ |
| `k3s_datastore_cafile` | Define the database TLS CA file. | _NULL_ |
| `k3s_datastore_certfile` | Define the database TLS Cert file. | _NULL_ |
| `k3s_datastore_keyfile` | Define the database TLS Key file. | _NULL_ |
| `k3s_dqlite_datastore` | Use DQLite as the database backend for HA. (EXPERIMENTAL) | `false` |
| Variable | Description | Default Value |
|----------------------------------|--------------------------------------------------------------------------|-----------------------------------------|
| `k3s_cluster_state` | State of cluster: installed, started, stopped, restarted, downloaded. | installed |
| `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for latest. | `false` |
| `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/rancher/k3s |
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
| `k3s_server_manifests_dir` | Path for place the `k3s_server_manifests_templates`. | `/var/lib/rancher/k3s/server/manifests` |
| `k3s_server_manifests_templates` | A list of Auto-Deploying Manifests Templates. | [] |
| `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` |
| `k3s_non_root` | Install k3s as non-root user. See notes below. | `false` |
| `k3s_control_workers` | Are control hosts also workers? | `true` |
| `k3s_cluster_cidr` | Network CIDR to use for pod IPs | 10.42.0.0/16 |
| `k3s_service_cidr` | Network CIDR to use for service IPs | 10.43.0.0/16 |
| `k3s_control_node_address` | Use a specific control node address. IP or FQDN. | _NULL_ |
| `k3s_control_token` | Use a specific control token, please read notes below. | _NULL_ |
| `k3s_https_port` | HTTPS port listening port. | 6443 |
| `k3s_use_docker` | Use Docker rather than Containerd? | `false` |
| `k3s_no_flannel` | Do not use Flannel | `false` |
| `k3s_flannel_backend` | Flannel backend ('none', 'vxlan', 'ipsec', or 'wireguard') | vxlan |
| `k3s_no_coredns` | Do not use CoreDNS | `false` |
| `k3s_cluster_dns` | Cluster IP for CoreDNS service. Should be in your service-cidr range. | _NULL_ |
| `k3s_cluster_domain` | Cluster Domain. | cluster.local |
| `k3s_resolv_conf` | Kubelet resolv.conf file | _NULL_ |
| `k3s_no_traefik` | Do not use Traefik | `false` |
| `k3s_no_servicelb` | Do not use ServiceLB, necessary for using something like MetalLB. | `false` |
| `k3s_no_local_storage` | Do not use Local Storage | `false` |
| `k3s_no_metrics_server` | Do not deploy metrics server | `false` |
| `k3s_disable_scheduler` | Disable Kubernetes default scheduler | `false` |
| `k3s_disable_cloud_controller` | Disable k3s default cloud controller manager. | `false` |
| `k3s_disable_network_policy` | Disable k3s default network policy controller. | `false` |
| `k3s_write_kubeconfig_mode` | Define the file mode from the generated KubeConfig, eg. `644` | _NULL_ |
| `k3s_datastore_endpoint` | Define the database or etcd cluster endpoint for HA. | _NULL_ |
| `k3s_datastore_cafile` | Define the database TLS CA file. | _NULL_ |
| `k3s_datastore_certfile` | Define the database TLS Cert file. | _NULL_ |
| `k3s_datastore_keyfile` | Define the database TLS Key file. | _NULL_ |
| `k3s_dqlite_datastore` | Use DQLite as the database backend for HA. (EXPERIMENTAL) | `false` |
#### Important note about `k3s_release_version`
@ -112,26 +114,23 @@ Please note that this may potentially break setting up agents.
Below are variables that are set against specific hosts in your inventory.
| Variable | Description | Default Value |
|----------------------------------|--------------------------------------------------------------------------|-----------------------------------------|
| `k3s_control_node` | Define the host as a control plane node, (True/False). | `false` |
| `k3s_node_name` | Define the name of this node. | `$(hostname)` |
| `k3s_node_id` | Define the ID of this node. | _NULL_ |
| `k3s_flannel_interface` | Define the flannel proxy interface for this node. | _NULL_ |
| `k3s_bind_address` | Define the bind address for this node. | localhost |
| `k3s_node_ip_address` | IP Address to advertise for this node. | _NULL_ |
| `k3s_node_external_address` | External IP Address to advertise for this node. | _NULL_ |
| `k3s_node_labels` | List of node labels. | _NULL_ |
| `k3s_node_taints` | List of node taints. | _NULL_ |
| `k3s_node_data_dir` | Folder to hold state. | `/var/lib/rancher/k3s` |
| `k3s_tls_san` | Add additional hosname or IP as Subject Alternate Name in the TLS cert. | _NULL_ |
| `k3s_server_manifests_dir` | Path for place the `k3s_server_manifests_templates`. | `/var/lib/rancher/k3s/server/manifests` |
| `k3s_server_manifests_templates` | A list of Auto-Deploying Manifests Templates. | [] |
| Variable | Description | Default Value |
|----------------------------------|--------------------------------------------------------------------------|------------------------|
| `k3s_control_node` | Define the host as a control plane node, (True/False). | `false` |
| `k3s_node_name` | Define the name of this node. | `$(hostname)` |
| `k3s_node_id` | Define the ID of this node. | _NULL_ |
| `k3s_flannel_interface` | Define the flannel proxy interface for this node. | _NULL_ |
| `k3s_bind_address` | Define the bind address for this node. | localhost |
| `k3s_node_ip_address` | IP Address to advertise for this node. | _NULL_ |
| `k3s_node_external_address` | External IP Address to advertise for this node. | _NULL_ |
| `k3s_node_labels` | List of node labels. | _NULL_ |
| `k3s_node_taints` | List of node taints. | _NULL_ |
| `k3s_node_data_dir` | Folder to hold state. | `/var/lib/rancher/k3s` |
| `k3s_tls_san` | Add additional hosname or IP as Subject Alternate Name in the TLS cert. | _NULL_ |
#### Important note about `k3s_control_node` and High Availability (HA)
By default only one host will be defined as a control node by Ansible, If you
By default only one host will be defined as a control node by Ansible, If you
do not set a host as a control node, the role will automatically delegate
the first play host as a control node (master). This is not suitable for use in
a Production workload.
@ -183,16 +182,35 @@ k3s_node_taints:
No dependencies on other roles.
## Example Playbook
## Example Playbooks
Example playbook:
Example playbook, single master node running v0.10.2:
```yaml
- hosts: k3s_nodes
become: true
roles:
- { role: xanmanning.k3s, k3s_release_version: v0.10.2 }
```
Example playbook, Highly Available running the latest release:
```yaml
- hosts: k3s_nodes
become: true
vars:
molecule_is_test: true
k3s_control_node_address: loadbalancer
k3s_datastore_endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
pre_tasks:
- name: Set each node to be a control node
set_fact:
k3s_control_node: true
when: inventory_hostname in ['node2', 'node3']
roles:
- role: xanmanning.k3s
```
## License
BSD

View File

@ -0,0 +1,17 @@
---
- name: Converge
hosts: node*
become: true
vars:
molecule_is_test: true
k3s_control_node_address: loadbalancer
k3s_datastore_endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable"
k3s_server_manifests_templates:
- "molecule/default/templates/00-ns-monitoring.yml.j2"
pre_tasks:
- name: Set each node to be a control node
set_fact:
k3s_control_node: true
when: inventory_hostname in ['node1', 'node2']
roles:
- role: xanmanning.k3s

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -48,7 +48,12 @@
- reload systemd
- restart k3s
- meta: flush_handlers
- name: Ensure secondary masters are started
service:
name: k3s
state: started
enabled: true
when: k3s_control_node and not k3s_primary_control_node
- name: Wait for control plane to be ready to accept connections
wait_for:
@ -58,6 +63,8 @@
timeout: 300
when: k3s_control_node
- meta: flush_handlers
- name: Wait for all nodes to be ready
command: "{{ k3s_install_dir }}/kubectl get nodes"
changed_when: false

View File

@ -1,13 +1,13 @@
---
- name: Ensure that the manifests directory exists, if required
- name: Ensure that the manifests directory exists
file:
state: directory
path: "{{ k3s_server_manifests_dir }}"
when: k3s_server_manifests_templates | length > 0
# https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests
- name: Copy Auto-Deploying Manifests to Cluster
- name: Ensure Auto-Deploying Manifests are copied to controllers
template:
src: "{{ item }}"
dest: "{{ k3s_server_manifests_dir }}/{{ item | basename | replace('.j2','') }}"

View File

@ -29,6 +29,8 @@
- import_tasks: build/download-k3s.yml
- import_tasks: build/preconfigure-k3s-auto-deploying-manifests.yml
when: k3s_control_node
and k3s_server_manifests_templates | length > 0
- import_tasks: build/install-k3s.yml

View File

@ -90,6 +90,18 @@
fail_msg: "--bind-address is not supported in {{ k3s_release_version }}"
when: k3s_bind_address is defined and k3s_bind_address
- name: Check k3s_bind_address against k3s version
assert:
that:
- (k3s_release_version | replace('v', '')) is version_compare('0.5.0', '>=')
success_msg: "Auto deploy manifests is supported in {{ k3s_release_version }}"
fail_msg: |
Auto deploy manifests supported is limited in {{ k3s_release_version }}.
To disable this message ensure k3s_use_experimental is set to true.
when: k3s_server_manifests_templates is defined
and k3s_server_manifests_templates | length > 0
and (k3s_use_experimental is not defined or not k3s_use_experimental)
- name: Check k3s_node_labels against k3s version
assert:
that: