Added new options for Flannel interfaces, tested on openSUSE LEAP 15.1

This commit is contained in:
Xan Manning 2019-09-29 18:03:50 +01:00
parent f077120580
commit 2327d0433d
6 changed files with 60 additions and 9 deletions

View File

@ -9,6 +9,8 @@ This role has been tested on Ansible 2.6.0+ against the following Linux Distribu
- CentOS 7
- Debian 9
- Debian 10
- openSUSE Leap 15
- Ubuntu 18.04 LTS
## Disclaimer
@ -33,6 +35,7 @@ consistency.
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
| `k3s_control_workers` | Are control hosts also workers? | `true` |
| `k3s_ensure_docker_installed ` | Use Docker rather than Containerd? | `false` |
| `k3s_no_flannel` | Do not use Flannel | `false` |
#### Important note about `k3s_release_version`
@ -48,9 +51,10 @@ k3s_release_version: v0.2.0
Below are variables that are set against specific hosts in your inventory.
| Variable | Description | Default Value |
|--------------------|--------------------------------------------------------|---------------|
| `k3s_control_node` | Define the host as a control plane node, (True/False). | `false` |
| Variable | Description | Default Value |
|-------------------------|--------------------------------------------------------|---------------|
| `k3s_control_node` | Define the host as a control plane node, (True/False). | `false` |
| `k3s_flannel_interface` | Define the flannel proxy interface for this node. | |
#### Important note about `k3s_control_node`
@ -60,6 +64,12 @@ set to true the play will fail.
If you do not set a host as a control node, the role will automatically delegate
the first play host as a control node.
#### Important note about `k3s_flannel_interface`
If you are running k3s on systems with multiple network interfaces, it is
necessary to have the flannel interface on a network interface that is routable
to the master node(s).
## Dependencies
No dependencies on other roles.

View File

@ -15,3 +15,6 @@ k3s_control_workers: true
# Ensure Docker is installed on nodes
k3s_ensure_docker_installed: false
# Disable flannel
k3s_no_flannel: false

View File

@ -43,6 +43,9 @@ galaxy_info:
- buster
- jessie
- stretch
- name: SLES
versions:
- 15
- name: Ubuntu
versions:
- xenial
@ -56,6 +59,7 @@ galaxy_info:
- containerd
- cluster
- rancher
- lightweight
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.

View File

@ -8,9 +8,9 @@ Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
{% if k3s_control_node %}
ExecStart={{ k3s_install_dir }}/k3s server{{ ' --disable-agent' if not k3s_control_workers else '' }}{{ ' --docker' if k3s_ensure_docker_installed else '' }}
ExecStart={{ k3s_install_dir }}/k3s server{{ ' --disable-agent' if not k3s_control_workers else '' }}{{ ' --flannel-iface ' + k3s_flannel_interface if k3s_flannel_interface is defined and not k3s_no_flannel else '' }}{{ ' --no-flannel' if k3s_no_flannel else '' }}{{ ' --docker' if k3s_ensure_docker_installed else '' }}
{% else %}
ExecStart={{ k3s_install_dir }}/k3s agent{{ ' --docker' if k3s_ensure_docker_installed else '' }} --server https://{{ k3s_control_node_address }}:6443 --token {{ k3s_control_token.content | b64decode }}
ExecStart={{ k3s_install_dir }}/k3s agent{{ ' --docker' if k3s_ensure_docker_installed else '' }}{{ ' --flannel-iface ' + k3s_flannel_interface if k3s_flannel_interface is defined and not k3s_no_flannel else '' }}{{ ' --no-flannel' if k3s_no_flannel else '' }} --server https://{{ k3s_control_node_address }}:6443 --token {{ k3s_control_token.content | b64decode }}
{% endif %}
KillMode=process
Delegate=yes

33
tests/Vagrantfile vendored
View File

@ -26,6 +26,18 @@ else
fi
SCRIPT
$opensuse_provision = <<SCRIPT
if [ ! -f .vagrant_provision ] ; then
echo "Installing dependencies ..."
sudo zypper refresh > /dev/null 2>&1 && echo "[OK] Update Zypper cache."
sudo zypper install -y python > /dev/null 2>&1 && \
echo "[OK] Installing Python."
touch .vagrant_provision
else
echo "Already Provisioned."
fi
SCRIPT
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
@ -52,20 +64,33 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end
config.vm.define "node3" do |node3|
node3.vm.box = "ubuntu/bionic64"
node3.vm.box = "bento/opensuse-leap-15.1"
node3.vm.hostname = "k3s-node03"
node3.vm.network "private_network", ip: "172.16.3.32"
node3.vm.synced_folder ".", "/vagrant", disabled: true
node3.vm.provider "virtualbox" do |vb|
vb.name = "k3s - node3"
vb.memory = 512
end
node3.vm.provision "shell", inline: $debian_provision
node3.vm.provision "ansible" do |a|
node3.vm.provision "shell", inline: $opensuse_provision
end
config.vm.define "node4" do |node4|
node4.vm.box = "ubuntu/bionic64"
node4.vm.hostname = "k3s-node04"
node4.vm.network "private_network", ip: "172.16.3.33"
node4.vm.provider "virtualbox" do |vb|
vb.name = "k3s - node4"
vb.memory = 512
end
node4.vm.provision "shell", inline: $debian_provision
node4.vm.provision "ansible" do |a|
a.limit = "all"
a.config_file = "ansible.cfg"
a.inventory_path = "inventory.yml"
a.playbook = "test.yml"
a.verbose = "v"
a.verbose = "vv"
end
end
end

View File

@ -10,13 +10,22 @@ k3s_nodes:
ansible_user: vagrant
ansible_port: 22
ansible_ssh_private_key_file: '.vagrant/machines/node1/virtualbox/private_key'
k3s_flannel_interface: eth1
node2:
ansible_host: 172.16.3.31
ansible_user: vagrant
ansible_port: 22
ansible_ssh_private_key_file: '.vagrant/machines/node2/virtualbox/private_key'
k3s_flannel_interface: eth1
node3:
ansible_host: 172.16.3.32
ansible_user: vagrant
ansible_port: 22
ansible_ssh_private_key_file: '.vagrant/machines/node3/virtualbox/private_key'
k3s_flannel_interface: eth1
node4:
ansible_host: 172.16.3.33
ansible_user: vagrant
ansible_port: 22
ansible_ssh_private_key_file: '.vagrant/machines/node4/virtualbox/private_key'
k3s_flannel_interface: enp0s8