Test playbook using molecule (#67)
* Test cluster using molecule * Fix detection of first control node * Include --flannel-iface and --node-ip as k3s arguments * Store logs of k3s-init.service as GitHub job artifacts
This commit is contained in:
parent
3c36dc8bfd
commit
a6b2a95b7e
@ -1,3 +1,16 @@
|
||||
---
|
||||
exclude_paths:
|
||||
# default paths
|
||||
- '.cache/'
|
||||
- '.github/'
|
||||
- 'test/fixtures/formatting-before/'
|
||||
- 'test/fixtures/formatting-prettier/'
|
||||
|
||||
# The "converge" and "reset" playbooks use import_playbook in
|
||||
# conjunction with the "env" lookup plugin, which lets the
|
||||
# syntax check of ansible-lint fail.
|
||||
- '**/converge.yml'
|
||||
- '**/reset.yml'
|
||||
|
||||
skip_list:
|
||||
- 'fqcn-builtins'
|
||||
|
||||
63
.github/workflows/test.yml
vendored
63
.github/workflows/test.yml
vendored
@ -7,24 +7,21 @@ on:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
vagrant:
|
||||
name: Vagrant
|
||||
molecule:
|
||||
name: Molecule
|
||||
runs-on: macos-12
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
scenario:
|
||||
- default
|
||||
env:
|
||||
HOMEBREW_NO_INSTALL_CLEANUP: 1
|
||||
VAGRANT_CWD: ${{ github.workspace }}/vagrant
|
||||
PYTHON_VERSION: "3.10"
|
||||
|
||||
steps:
|
||||
- name: Check out the codebase
|
||||
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # 3.0.2
|
||||
|
||||
- name: Install Ansible
|
||||
run: brew install ansible
|
||||
|
||||
- name: Install role dependencies
|
||||
run: ansible-galaxy install -r collections/requirements.yml
|
||||
|
||||
- name: Configure VirtualBox
|
||||
run: >-
|
||||
sudo mkdir -p /etc/vbox &&
|
||||
@ -39,31 +36,29 @@ jobs:
|
||||
restore-keys: |
|
||||
vagrant-boxes
|
||||
|
||||
- name: Create virtual machines
|
||||
run: vagrant up
|
||||
timeout-minutes: 10
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Provision cluster using Ansible
|
||||
# Since Ansible sets up _all_ machines, it is sufficient to run it only
|
||||
# once (i.e, for a single node - we are choosing control1 here)
|
||||
run: vagrant provision control1 --provision-with ansible
|
||||
timeout-minutes: 25
|
||||
- name: Install dependencies
|
||||
run: >-
|
||||
python3 -m pip install --upgrade pip &&
|
||||
python3 -m pip install -r requirements.txt
|
||||
|
||||
- name: Set up kubectl on the host
|
||||
run: brew install kubectl &&
|
||||
mkdir -p ~/.kube &&
|
||||
vagrant ssh control1 --command "cat ~/.kube/config" > ~/.kube/config
|
||||
- name: Test with molecule
|
||||
run: molecule test --scenario-name ${{ matrix.scenario }}
|
||||
env:
|
||||
ANSIBLE_K3S_LOG_DIR: ${{ runner.temp }}/logs/k3s-ansible/${{ matrix.scenario }}
|
||||
|
||||
- name: Show cluster nodes
|
||||
run: kubectl describe -A nodes
|
||||
|
||||
- name: Show cluster pods
|
||||
run: kubectl describe -A pods
|
||||
|
||||
- name: Test cluster
|
||||
run: $VAGRANT_CWD/test_cluster.py --verbose --locals
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Destroy virtual machines
|
||||
- name: Upload log files
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant destroy --force
|
||||
uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # 3.1.0
|
||||
with:
|
||||
name: logs
|
||||
path: |
|
||||
${{ runner.temp }}/logs
|
||||
|
||||
- name: Delete old box versions
|
||||
if: always() # do this even if a step before has failed
|
||||
run: vagrant box prune --force
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -1 +1 @@
|
||||
.vagrant
|
||||
.env/
|
||||
|
||||
10
README.md
10
README.md
@ -100,12 +100,12 @@ See the commands [here](https://docs.technotim.live/posts/k3s-etcd-ansible/#test
|
||||
|
||||
Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
|
||||
|
||||
### 🔷 Vagrant
|
||||
### Testing the playbook using molecule
|
||||
|
||||
You may want to kickstart your k3s cluster by using Vagrant to quickly build you all needed VMs with one command.
|
||||
Head to the `vagrant` subfolder and type `vagrant up` to get your environment setup.
|
||||
After the VMs have got build, deploy k3s using the Ansible playbook `site.yml` by the
|
||||
`vagrant provision --provision-with ansible` command.
|
||||
This playbook includes a [molecule](https://molecule.rtfd.io/)-based test setup.
|
||||
It is run automatically in CI, but you can also run the tests locally.
|
||||
This might be helpful for quick feedback in a few cases.
|
||||
You can find more information about it [here](molecule/README.md).
|
||||
|
||||
## Thanks 🤝
|
||||
|
||||
|
||||
@ -2,3 +2,4 @@
|
||||
collections:
|
||||
- name: community.general
|
||||
- name: ansible.posix
|
||||
- name: kubernetes.core
|
||||
|
||||
@ -17,9 +17,23 @@ apiserver_endpoint: "192.168.30.222"
|
||||
# this token should be alpha numeric only
|
||||
k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
||||
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: '{{ ansible_facts[flannel_iface]["ipv4"]["address"] }}'
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
--flannel-iface={{ flannel_iface }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required one is --disable servicelb
|
||||
extra_server_args: "--disable servicelb --disable traefik"
|
||||
extra_agent_args: ""
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: "v0.5.0"
|
||||
|
||||
67
molecule/README.md
Normal file
67
molecule/README.md
Normal file
@ -0,0 +1,67 @@
|
||||
# Test suites for `k3s-ansible`
|
||||
|
||||
This folder contains the [molecule](https://molecule.rtfd.io/)-based test setup for this playbook.
|
||||
|
||||
## Scenarios
|
||||
|
||||
We have these scenarios:
|
||||
|
||||
- **default**:
|
||||
A 3 control + 2 worker node cluster based very closely on the [sample inventory](../inventory/sample/).
|
||||
|
||||
## How to execute
|
||||
|
||||
To test on your local machine, follow these steps:
|
||||
|
||||
### System requirements
|
||||
|
||||
Make sure that the following software packages are available on your system:
|
||||
|
||||
- [Python 3](https://www.python.org/downloads)
|
||||
- [Vagrant](https://www.vagrantup.com/downloads)
|
||||
- [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
|
||||
|
||||
### Set up VirtualBox networking on Linux and macOS
|
||||
|
||||
_You can safely skip this if you are working on Windows._
|
||||
|
||||
Furthermore, the test cluster uses the `192.168.30.0/24` subnet which is [not set up by VirtualBox automatically](https://www.virtualbox.org/manual/ch06.html#network_hostonly).
|
||||
To set the subnet up for use with VirtualBox, please make sure that `/etc/vbox/networks.conf` exists and that it contains this line:
|
||||
|
||||
```
|
||||
* 192.168.30.0/24`
|
||||
```
|
||||
|
||||
### Install Python dependencies
|
||||
|
||||
You will get [Molecule, Ansible and a few extra dependencies](../requirements.txt) via [pip](https://pip.pypa.io/).
|
||||
Usually, it is advisable to work in a [virtual environment](https://docs.python.org/3/tutorial/venv.html) for this:
|
||||
|
||||
```bash
|
||||
cd /path/to/k3s-ansible
|
||||
|
||||
# Create a virtualenv at ".env". You only need to do this once.
|
||||
python3 -m venv .env
|
||||
|
||||
# Activate the virtualenv for your current shell session.
|
||||
# If you start a new session, you will have to repeat this.
|
||||
source .env/bin/activate
|
||||
|
||||
# Install the required packages into the virtualenv.
|
||||
# These remain installed across shell sessions.
|
||||
python3 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Run molecule
|
||||
|
||||
With the virtual environment from the previous step active in your shell session, you can now use molecule to test the playbook.
|
||||
Interesting commands are:
|
||||
|
||||
- `molecule create`: Create virtual machines for the test cluster nodes.
|
||||
- `molecule destroy`: Delete the virtual machines for the test cluster nodes.
|
||||
- `molecule converge`: Run the `site` playbook on the nodes of the test cluster.
|
||||
- `molecule side_effect`: Run the `reset` playbook on the nodes of the test cluster.
|
||||
- `molecule verify`: Verify that the cluster works correctly.
|
||||
- `molecule test`: The "all-in-one" sequence of steps that is executed in CI.
|
||||
This includes the `create`, `converge`, `verify`, `side_effect` and `destroy` steps.
|
||||
See [`molecule.yml`](default/molecule.yml) for more details.
|
||||
75
molecule/default/molecule.yml
Normal file
75
molecule/default/molecule.yml
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- &control
|
||||
name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 2048
|
||||
cpus: 2
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: "vagrant"
|
||||
ssh.password: "vagrant"
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.38
|
||||
- <<: *control
|
||||
name: control2
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.39
|
||||
- <<: *control
|
||||
name: control3
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.40
|
||||
- &node
|
||||
<<: *control
|
||||
name: node1
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- node
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.41
|
||||
- <<: *node
|
||||
name: node2
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.42
|
||||
provisioner:
|
||||
name: ansible
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
env:
|
||||
OVERRIDES_FILE: ../default/overrides.yml
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- lint
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
11
molecule/default/overrides.yml
Normal file
11
molecule/default/overrides.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
|
||||
flannel_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
7
molecule/resources/converge.yml
Normal file
7
molecule/resources/converge.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
ansible.builtin.import_playbook: >-
|
||||
{{ lookup("ansible.builtin.env", "OVERRIDES_FILE") }}
|
||||
|
||||
- name: Converge
|
||||
ansible.builtin.import_playbook: ../../site.yml
|
||||
7
molecule/resources/reset.yml
Normal file
7
molecule/resources/reset.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
ansible.builtin.import_playbook: >-
|
||||
{{ lookup("ansible.builtin.env", "OVERRIDES_FILE") }}
|
||||
|
||||
- name: Reset
|
||||
ansible.builtin.import_playbook: ../../reset.yml
|
||||
5
molecule/resources/verify.yml
Normal file
5
molecule/resources/verify.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Verify
|
||||
hosts: all
|
||||
roles:
|
||||
- verify/from_outside
|
||||
9
molecule/resources/verify/from_outside/defaults/main.yml
Normal file
9
molecule/resources/verify/from_outside/defaults/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# A host outside of the cluster from which the checks shall be performed
|
||||
outside_host: localhost
|
||||
|
||||
# This kubernetes namespace will be used for testing
|
||||
testing_namespace: molecule-verify-from-outside
|
||||
|
||||
# The directory in which the example manifests reside
|
||||
example_manifests_path: ../../../../example
|
||||
@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Clean up kubecfg
|
||||
ansible.builtin.file:
|
||||
path: "{{ kubecfg.path }}"
|
||||
state: absent
|
||||
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Create temporary directory for kubecfg
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
suffix: kubecfg
|
||||
register: kubecfg
|
||||
- name: Gathering facts
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
ansible.builtin.gather_facts:
|
||||
- name: Download kubecfg
|
||||
ansible.builtin.fetch:
|
||||
src: "{{ ansible_env.HOME }}/.kube/config"
|
||||
dest: "{{ kubecfg.path }}/"
|
||||
flat: true
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
delegate_facts: true
|
||||
- name: Store path to kubecfg
|
||||
ansible.builtin.set_fact:
|
||||
kubecfg_path: "{{ kubecfg.path }}/config"
|
||||
12
molecule/resources/verify/from_outside/tasks/main.yml
Normal file
12
molecule/resources/verify/from_outside/tasks/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Verify
|
||||
run_once: true
|
||||
delegate_to: "{{ outside_host }}"
|
||||
block:
|
||||
- ansible.builtin.import_tasks: kubecfg-fetch.yml
|
||||
- name: "TEST CASE: Get nodes"
|
||||
ansible.builtin.include_tasks: test/get-nodes.yml
|
||||
- name: "TEST CASE: Deploy example"
|
||||
ansible.builtin.include_tasks: test/deploy-example.yml
|
||||
always:
|
||||
- ansible.builtin.import_tasks: kubecfg-cleanup.yml
|
||||
@ -0,0 +1,54 @@
|
||||
---
|
||||
- name: Deploy example
|
||||
block:
|
||||
- name: "Create namespace: {{ testing_namespace }}"
|
||||
kubernetes.core.k8s:
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
name: "{{ testing_namespace }}"
|
||||
state: present
|
||||
wait: true
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
|
||||
- name: Apply example manifests
|
||||
kubernetes.core.k8s:
|
||||
src: "{{ example_manifests_path }}/{{ item }}"
|
||||
namespace: "{{ testing_namespace }}"
|
||||
state: present
|
||||
wait: true
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
with_items:
|
||||
- deployment.yml
|
||||
- service.yml
|
||||
|
||||
- name: Get info about nginx service
|
||||
kubernetes.core.k8s_info:
|
||||
kind: service
|
||||
name: nginx
|
||||
namespace: "{{ testing_namespace }}"
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
vars: &load_balancer_metadata
|
||||
metallb_ip: status.loadBalancer.ingress[0].ip
|
||||
metallb_port: spec.ports[0].port
|
||||
register: nginx_services
|
||||
|
||||
- name: Assert that the nginx welcome page is available
|
||||
ansible.builtin.uri:
|
||||
url: http://{{ ip }}:{{ port }}/
|
||||
return_content: yes
|
||||
register: result
|
||||
failed_when: "'Welcome to nginx!' not in result.content"
|
||||
vars:
|
||||
ip: >-
|
||||
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||
port: >-
|
||||
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||
|
||||
always:
|
||||
- name: "Remove namespace: {{ testing_namespace }}"
|
||||
kubernetes.core.k8s:
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
name: "{{ testing_namespace }}"
|
||||
state: absent
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Get all nodes in cluster
|
||||
kubernetes.core.k8s_info:
|
||||
kind: node
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
register: cluster_nodes
|
||||
|
||||
- name: Assert that the cluster contains exactly the expected nodes
|
||||
ansible.builtin.assert:
|
||||
that: found_nodes == expected_nodes
|
||||
success_msg: "Found nodes as expected: {{ found_nodes }}"
|
||||
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
|
||||
vars:
|
||||
found_nodes: >-
|
||||
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique }}
|
||||
expected_nodes: >-
|
||||
{{ (groups['master'] + groups['node']) | unique }}
|
||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@ -0,0 +1,7 @@
|
||||
ansible-core>=2.13.2
|
||||
jmespath
|
||||
jsonpatch
|
||||
kubernetes>=12.0.0
|
||||
molecule-vagrant>=1.0.0
|
||||
molecule>=4.0.1
|
||||
pyyaml>=3.11
|
||||
@ -2,10 +2,10 @@
|
||||
ansible_user: root
|
||||
server_init_args: >-
|
||||
{% if groups['master'] | length > 1 %}
|
||||
{% if ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) %}
|
||||
{% if ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}:6443
|
||||
--server https://{{ hostvars[groups['master'][0]].k3s_node_ip }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
|
||||
28
roles/k3s/master/tasks/fetch_k3s_init_logs.yml
Normal file
28
roles/k3s/master/tasks/fetch_k3s_init_logs.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
# Download logs of k3s-init.service from the nodes to localhost.
|
||||
# Note that log_destination must be set.
|
||||
|
||||
- name: Fetch k3s-init.service logs
|
||||
ansible.builtin.command:
|
||||
cmd: journalctl --all --unit=k3s-init.service
|
||||
changed_when: false
|
||||
register: k3s_init_log
|
||||
|
||||
- name: Create {{ log_destination }}
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
become: false
|
||||
ansible.builtin.file:
|
||||
path: "{{ log_destination }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Store logs to {{ log_destination }}
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
ansible.builtin.template:
|
||||
src: content.j2
|
||||
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
||||
mode: 0644
|
||||
vars:
|
||||
content: "{{ k3s_init_log.stdout }}"
|
||||
@ -20,7 +20,7 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip rbac manifest to first master
|
||||
template:
|
||||
@ -29,7 +29,7 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
template:
|
||||
@ -38,7 +38,7 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
# these will be copied and installed now, then tested later and apply config
|
||||
- name: Copy metallb namespace to first master
|
||||
@ -48,7 +48,7 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy metallb namespace to first master
|
||||
template:
|
||||
@ -57,7 +57,7 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
when: ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0])
|
||||
when: ansible_hostname == hostvars[groups['master'][0]]['ansible_hostname']
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
command:
|
||||
@ -80,6 +80,12 @@
|
||||
delay: 10
|
||||
changed_when: false
|
||||
always:
|
||||
- name: Save logs of k3s-init.service
|
||||
include_tasks: fetch_k3s_init_logs.yml
|
||||
when: log_destination
|
||||
vars:
|
||||
log_destination: >-
|
||||
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||
- name: Kill the temporary service used for initialization
|
||||
systemd:
|
||||
name: k3s-init
|
||||
|
||||
5
roles/k3s/master/templates/content.j2
Normal file
5
roles/k3s/master/templates/content.j2
Normal file
@ -0,0 +1,5 @@
|
||||
{#
|
||||
This is a really simple template that just outputs the
|
||||
value of the "content" variable.
|
||||
#}
|
||||
{{ content }}
|
||||
79
vagrant/Vagrantfile
vendored
79
vagrant/Vagrantfile
vendored
@ -1,79 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
# General configuration
|
||||
config.vm.box = "generic/ubuntu2204"
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.ssh.insert_key = false
|
||||
|
||||
config.vm.provider :virtualbox do |v|
|
||||
v.memory = 2048
|
||||
v.cpus = 2
|
||||
v.linked_clone = true
|
||||
end
|
||||
|
||||
# Control Node 1
|
||||
config.vm.define "control1" do |control1|
|
||||
control1.vm.hostname = "control1"
|
||||
control1.vm.network "private_network", ip: "192.168.30.38"
|
||||
end
|
||||
|
||||
# Control Node 2
|
||||
config.vm.define "control2" do |control2|
|
||||
control2.vm.hostname = "control2"
|
||||
control2.vm.network "private_network", ip: "192.168.30.39"
|
||||
end
|
||||
|
||||
# Control Node 3
|
||||
config.vm.define "control3" do |control3|
|
||||
control3.vm.hostname = "control3"
|
||||
control3.vm.network "private_network", ip: "192.168.30.40"
|
||||
end
|
||||
|
||||
# Worker Node 1
|
||||
config.vm.define "node1" do |node1|
|
||||
node1.vm.hostname = "node1"
|
||||
node1.vm.network "private_network", ip: "192.168.30.41"
|
||||
end
|
||||
|
||||
# Worker Node 2
|
||||
config.vm.define "node2" do |node2|
|
||||
node2.vm.hostname = "node2"
|
||||
node2.vm.network "private_network", ip: "192.168.30.42"
|
||||
end
|
||||
|
||||
config.vm.provision "ansible",type: "ansible", run: "never" do |ansible|
|
||||
ansible.playbook = "../site.yml"
|
||||
ansible.limit = "all"
|
||||
ansible.groups = {
|
||||
"master" => ["control1", "control2", "control3"],
|
||||
"node" => ["node1", "node2"],
|
||||
"k3s_cluster:children" => ["master", "node"],
|
||||
"k3s_cluster:vars" => {"k3s_version" => "v1.24.4+k3s1",
|
||||
"ansible_user" => "vagrant",
|
||||
"systemd_dir" => "/etc/systemd/system",
|
||||
"flannel_iface" => "eth1",
|
||||
"apiserver_endpoint" => "192.168.30.222",
|
||||
"k3s_token" => "supersecret",
|
||||
"extra_server_args" => "--node-ip={{ ansible_eth1.ipv4.address }} --flannel-iface={{ flannel_iface }} --no-deploy servicelb --no-deploy traefik",
|
||||
"extra_agent_args" => "--flannel-iface={{ flannel_iface }}",
|
||||
"kube_vip_tag_version" => "v0.5.0",
|
||||
"metal_lb_speaker_tag_version" => "v0.13.4",
|
||||
"metal_lb_controller_tag_version" => "v0.13.4",
|
||||
"metal_lb_ip_range" => "192.168.30.80-192.168.30.90",
|
||||
"retry_count" => "60"}
|
||||
}
|
||||
ansible.host_vars = {
|
||||
"control1" => {
|
||||
"server_init_args" => "--cluster-init --token {{ k3s_token }} {{ extra_server_args | default('') }}"
|
||||
},
|
||||
"control2" => {
|
||||
"server_init_args" => "--server https://192.168.30.38:6443 --token {{ k3s_token }} {{ extra_server_args | default('') }}"
|
||||
},
|
||||
"control3" => {
|
||||
"server_init_args" => "--server https://192.168.30.38:6443 --token {{ k3s_token }} {{ extra_server_args | default('') }}"
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
@ -1,114 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Perform a few tests on a cluster created with this playbook.
|
||||
# To simplify test execution, the scripts does not depend on any third-party
|
||||
# packages, only the Python standard library.
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from time import sleep
|
||||
from warnings import warn
|
||||
|
||||
|
||||
VAGRANT_DIR = Path(__file__).parent.absolute()
|
||||
PLAYBOOK_DIR = VAGRANT_DIR.parent.absolute()
|
||||
|
||||
|
||||
class TestK3sCluster(unittest.TestCase):
|
||||
def _kubectl(self, args: str, json_out: bool = True) -> dict | None:
|
||||
cmd = "kubectl"
|
||||
if json_out:
|
||||
cmd += " -o json"
|
||||
cmd += f" {args}"
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, shell=True, check=True)
|
||||
|
||||
if json_out:
|
||||
return json.loads(result.stdout)
|
||||
else:
|
||||
return None
|
||||
|
||||
def _curl(self, url: str) -> str:
|
||||
options = [
|
||||
"--silent", # no progress info
|
||||
"--show-error", # ... but errors should still be shown
|
||||
"--fail", # set exit code on error
|
||||
"--location", # follow redirects
|
||||
]
|
||||
cmd = f'curl {" ".join(options)} "{url}"'
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, shell=True, check=True)
|
||||
output = result.stdout.decode("utf-8")
|
||||
return output
|
||||
|
||||
def _apply_manifest(self, manifest_file: Path) -> dict:
|
||||
apply_result = self._kubectl(
|
||||
f'apply --filename="{manifest_file}" --cascade="background"'
|
||||
)
|
||||
self.addCleanup(
|
||||
lambda: self._kubectl(
|
||||
f'delete --filename="{manifest_file}"',
|
||||
json_out=False,
|
||||
)
|
||||
)
|
||||
return apply_result
|
||||
|
||||
@staticmethod
|
||||
def _retry(function, retries: int = 5, seconds_between_retries=1):
|
||||
for retry in range(1, retries + 1):
|
||||
try:
|
||||
return function()
|
||||
except Exception as exc:
|
||||
if retry < retries:
|
||||
sleep(seconds_between_retries)
|
||||
continue
|
||||
else:
|
||||
raise exc
|
||||
|
||||
def _get_load_balancer_ip(
|
||||
self,
|
||||
service: str,
|
||||
namespace: str = "default",
|
||||
) -> str | None:
|
||||
svc_description = self._kubectl(
|
||||
f'get --namespace="{namespace}" service "{service}"'
|
||||
)
|
||||
ip = svc_description["status"]["loadBalancer"]["ingress"][0]["ip"]
|
||||
return ip
|
||||
|
||||
def test_nodes_exist(self):
|
||||
out = self._kubectl("get nodes")
|
||||
node_names = {item["metadata"]["name"] for item in out["items"]}
|
||||
self.assertEqual(
|
||||
node_names,
|
||||
{"control1", "control2", "control3", "node1", "node2"},
|
||||
)
|
||||
|
||||
def test_ip_address_pool_exists(self):
|
||||
out = self._kubectl("get --all-namespaces IpAddressPool")
|
||||
pools = out["items"]
|
||||
self.assertGreater(len(pools), 0)
|
||||
|
||||
def test_nginx_example_page(self):
|
||||
# Deploy the manifests to the cluster
|
||||
deployment = self._apply_manifest(PLAYBOOK_DIR / "example" / "deployment.yml")
|
||||
service = self._apply_manifest(PLAYBOOK_DIR / "example" / "service.yml")
|
||||
|
||||
# Assert that the dummy page is available
|
||||
metallb_ip = self._retry(
|
||||
lambda: self._get_load_balancer_ip(service["metadata"]["name"])
|
||||
)
|
||||
# Now that an IP address was assigned, let's reload the service description:
|
||||
service = self._kubectl(f'get service "{service["metadata"]["name"]}"')
|
||||
metallb_port = service["spec"]["ports"][0]["port"]
|
||||
|
||||
response_body = self._retry(
|
||||
lambda: self._curl(f"http://{metallb_ip}:{metallb_port}/")
|
||||
)
|
||||
self.assertIn("Welcome to nginx!", response_body)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Loading…
Reference in New Issue
Block a user