* Update pre-commit actions

This was done by running "pre-commit autoupdate --freeze".

* Remove pre-commit only dependencies from requirements.in

Including them in the file would create the illusion that those were the
versions actually used in CI, but they are not. The exact versions are
determined by the pre-commit hooks which are pinned in
.pre-commit-config.yaml.

* Ansible Lint: Fix role-name[path]

* Ansible Lint: Fix name[play]

* Ansible Lint: Fix key-order[task]

* Ansible Lint: Fix jinja[spacing]

* Ansible Lint: Fix no-free-form

* Ansible Lint: Fix var-naming[no-reserved]

* Ansible Lint: Fix yaml[comments]

* Ansible Lint: Fix yaml[line-length]

* Ansible Lint: Fix name[casing]

* Ansible Lint: Fix no-changed-when

* Ansible Lint: Fix fqcn[action]

* Ansible Lint: Fix args[module]

* Improve task naming
This commit is contained in:
Simon Leiner 2023-07-20 17:50:02 +02:00 committed by GitHub
parent edd4838407
commit 33ae0d4970
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 82 additions and 122 deletions

View File

@ -1,7 +1,7 @@
---
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 3298ddab3c13dd77d6ce1fc0baf97691430d84b0 # v4.3.0
rev: f71fa2c1f9cf5cb705f73dffe4b21f7c61470ba9 # frozen: v4.4.0
hooks:
- id: requirements-txt-fixer
- id: sort-simple-yaml
@ -12,24 +12,24 @@ repos:
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/adrienverge/yamllint.git
rev: 9cce2940414e9560ae4c8518ddaee2ac1863a4d2 # v1.28.0
rev: b05e028c5881819161d11cb543fd96a30c06cceb # frozen: v1.32.0
hooks:
- id: yamllint
args: [-c=.yamllint]
- repo: https://github.com/ansible-community/ansible-lint.git
rev: a058554b9bcf88f12ad09ab9fb93b267a214368f # v6.8.6
rev: 3293b64b939c0de16ef8cb81dd49255e475bf89a # frozen: v6.17.2
hooks:
- id: ansible-lint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: 4c7c3dd7161ef39e984cb295e93a968236dc8e8a # v0.8.0.4
rev: 375289a39f5708101b1f916eb729e8d6da96993f # frozen: v0.9.0.5
hooks:
- id: shellcheck
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: 04618e68aa2380828a36a23ff5f65a06ae8f59b9 # v1.3.1
rev: 12885e376b93dc4536ad68d156065601e4433665 # frozen: v1.5.1
hooks:
- id: remove-crlf
- id: remove-tabs
- repo: https://github.com/sirosen/texthooks
rev: 30d9af95631de0d7cff4e282bde9160d38bb0359 # 0.4.0
rev: c4ffd3e31669dd4fa4d31a23436cc13839730084 # frozen: 0.5.0
hooks:
- id: fix-smartquotes

View File

@ -4,7 +4,8 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@ -4,7 +4,8 @@
tasks:
- name: Override host variables (1/2)
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# In this scenario, we have multiple interfaces that the VIP could be

View File

@ -2,4 +2,4 @@
- name: Verify
hosts: all
roles:
- verify/from_outside
- verify_from_outside

View File

@ -6,4 +6,4 @@ outside_host: localhost
testing_namespace: molecule-verify-from-outside
# The directory in which the example manifests reside
example_manifests_path: ../../../../example
example_manifests_path: ../../../example

View File

@ -34,14 +34,14 @@
- name: Assert that the nginx welcome page is available
ansible.builtin.uri:
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port }}/
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
return_content: yes
register: result
failed_when: "'Welcome to nginx!' not in result.content"
vars:
ip: >-
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port: >-
port_: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap

View File

@ -4,7 +4,8 @@
tasks:
- name: Override host variables
ansible.builtin.set_fact:
# See: https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant # noqa yaml[line-length]
# See:
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
flannel_iface: eth1
# The test VMs might be a bit slow, so we give them more time to join the cluster:

View File

@ -1,5 +1,4 @@
ansible-core>=2.13.5
ansible-lint>=6.8.6
jmespath>=1.0.1
jsonpatch>=1.32
kubernetes>=25.3.0
@ -9,4 +8,3 @@ netaddr>=0.8.0
pre-commit>=2.20.0
pre-commit-hooks>=1.3.1
pyyaml>=6.0
yamllint>=1.28.0

View File

@ -1,6 +1,6 @@
#
# This file is autogenerated by pip-compile with python 3.8
# To update, run:
# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
# pip-compile requirements.in
#
@ -10,19 +10,12 @@ ansible-core==2.14.5
# via
# -r requirements.in
# ansible-compat
# ansible-lint
ansible-lint==6.15.0
# via -r requirements.in
arrow==1.2.3
# via jinja2-time
attrs==22.1.0
# via jsonschema
binaryornot==0.4.4
# via cookiecutter
black==22.10.0
# via ansible-lint
bracex==2.3.post1
# via wcmatch
cachetools==5.2.0
# via google-auth
certifi==2022.9.24
@ -39,7 +32,6 @@ charset-normalizer==2.1.1
# via requests
click==8.1.3
# via
# black
# click-help-colors
# cookiecutter
# molecule
@ -58,9 +50,7 @@ distro==1.8.0
enrich==1.2.7
# via molecule
filelock==3.8.0
# via
# ansible-lint
# virtualenv
# via virtualenv
google-auth==2.14.0
# via kubernetes
identify==2.5.8
@ -85,7 +75,6 @@ jsonpointer==2.3
jsonschema==4.17.0
# via
# ansible-compat
# ansible-lint
# molecule
kubernetes==25.3.0
# via -r requirements.in
@ -97,8 +86,6 @@ molecule==4.0.4
# molecule-vagrant
molecule-vagrant==1.0.0
# via -r requirements.in
mypy-extensions==0.4.3
# via black
netaddr==0.8.0
# via -r requirements.in
nodeenv==1.7.0
@ -109,16 +96,9 @@ packaging==21.3
# via
# ansible-compat
# ansible-core
# ansible-lint
# molecule
pathspec==0.10.1
# via
# black
# yamllint
platformdirs==2.5.2
# via
# black
# virtualenv
# via virtualenv
pluggy==1.0.0
# via molecule
pre-commit==2.21.0
@ -152,13 +132,11 @@ pyyaml==6.0
# -r requirements.in
# ansible-compat
# ansible-core
# ansible-lint
# cookiecutter
# kubernetes
# molecule
# molecule-vagrant
# pre-commit
# yamllint
requests==2.28.1
# via
# cookiecutter
@ -170,15 +148,12 @@ resolvelib==0.8.1
# via ansible-core
rich==12.6.0
# via
# ansible-lint
# enrich
# molecule
rsa==4.9
# via google-auth
ruamel-yaml==0.17.21
# via
# ansible-lint
# pre-commit-hooks
# via pre-commit-hooks
selinux==0.2.1
# via molecule-vagrant
six==1.16.0
@ -187,9 +162,7 @@ six==1.16.0
# kubernetes
# python-dateutil
subprocess-tee==0.4.1
# via
# ansible-compat
# ansible-lint
# via ansible-compat
text-unidecode==1.3
# via python-slugify
urllib3==1.26.12
@ -198,14 +171,8 @@ urllib3==1.26.12
# requests
virtualenv==20.16.6
# via pre-commit
wcmatch==8.4.1
# via ansible-lint
websocket-client==1.4.2
# via kubernetes
yamllint==1.31.0
# via
# -r requirements.in
# ansible-lint
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View File

@ -1,6 +1,6 @@
---
- hosts: k3s_cluster
- name: Reset k3s cluster
hosts: k3s_cluster
gather_facts: yes
roles:
- role: reset
@ -14,7 +14,8 @@
reboot:
reboot_timeout: 3600
- hosts: proxmox
- name: Revert changes to Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
remote_user: "{{ proxmox_lxc_ssh_user }}"

View File

@ -1,17 +1,16 @@
---
- name: Clean previous runs of k3s-init
- name: Stop k3s-init
systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
args:
warn: false # The ansible systemd module does not support reset-failed
- name: Deploy vip manifest
include_tasks: vip.yml
@ -28,6 +27,7 @@
creates: "{{ systemd_dir }}/k3s.service"
- name: Verification
when: not ansible_check_mode
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
@ -49,7 +49,6 @@
name: k3s-init
state: stopped
failed_when: false
when: not ansible_check_mode
- name: Copy K3s service file
register: k3s_service

View File

@ -10,7 +10,7 @@
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{metal_lb_type}}.yaml" # noqa yaml[line-length]
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
owner: root
group: root

View File

@ -1,4 +1,4 @@
---
- name: reboot server
- name: Reboot server
become: true
reboot:

View File

@ -1,30 +1,30 @@
---
- name: Set same timezone on every Server
timezone:
community.general.timezone:
name: "{{ system_timezone }}"
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
- name: Set SELinux to disabled state
selinux:
ansible.posix.selinux:
state: disabled
when: ansible_os_family == "RedHat"
- name: Enable IPv4 forwarding
sysctl:
ansible.posix.sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
- name: Enable IPv6 forwarding
sysctl:
ansible.posix.sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
- name: Enable IPv6 router advertisements
sysctl:
ansible.posix.sysctl:
name: net.ipv6.conf.all.accept_ra
value: "2"
state: present
@ -38,13 +38,13 @@
when: ansible_os_family == "RedHat"
- name: Load br_netfilter
modprobe:
community.general.modprobe:
name: br_netfilter
state: present
when: ansible_os_family == "RedHat"
- name: Set bridge-nf-call-iptables (just to be sure)
sysctl:
ansible.posix.sysctl:
name: "{{ item }}"
value: "1"
state: present

View File

@ -1,5 +1,11 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"
- name: Reboot containers
block:
- name: Get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
- name: Reboot container
command: "pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true

View File

@ -1,21 +1,15 @@
---
- name: check for container files that exist on this host
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:

View File

@ -1,3 +1,3 @@
---
- name: reboot
- name: Reboot
reboot:

View File

@ -47,20 +47,16 @@
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match("Debian.*bullseye")
- name: execute OS related tasks on the Raspberry Pi - {{ action }}
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
with_first_found:
- "{{ action }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action }}/{{ detected_distribution }}.yml"
- "{{ action }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action }}/{{ ansible_distribution }}.yml"
- "{{ action }}/default.yml"
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action_ }}/{{ detected_distribution }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- "{{ action_ }}/{{ ansible_distribution }}.yml"
- "{{ action_ }}/default.yml"
vars:
action: >-
{% if state == "present" -%}
setup
{%- else -%}
teardown
{%- endif %}
action_: >-
{% if state == "present" %}setup{% else %}teardown{% endif %}
when:
- raspberry_pi|default(false)

View File

@ -8,20 +8,22 @@
notify: reboot
- name: Install iptables
apt: name=iptables state=present
apt:
name: iptables
state: present
- name: Flush iptables before changing to iptables-legacy
iptables:
flush: true
- name: Changing to iptables-legacy
alternatives:
community.general.alternatives:
path: /usr/sbin/iptables-legacy
name: iptables
register: ip4_legacy
- name: Changing to ip6tables-legacy
alternatives:
community.general.alternatives:
path: /usr/sbin/ip6tables-legacy
name: ip6tables
register: ip6_legacy

View File

@ -9,7 +9,7 @@
check_mode: false
- name: Umount filesystem
mount:
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items:

View File

@ -1,5 +0,0 @@
---
- name: reboot containers
command:
"pct reboot {{ item }}"
loop: "{{ proxmox_lxc_filtered_ids }}"

View File

@ -0,0 +1 @@
../../proxmox_lxc/handlers/main.yml

View File

@ -1,21 +1,15 @@
---
- name: check for container files that exist on this host
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: filter out files that do not exist
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
# used for the reboot handler
- name: get container ids from filtered files
set_fact:
proxmox_lxc_filtered_ids:
'{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}'
- name: Remove LXC apparmor profile
lineinfile:
dest: "{{ item }}"

View File

@ -1,13 +1,14 @@
---
- hosts: proxmox
- name: Prepare Proxmox cluster
hosts: proxmox
gather_facts: true
become: yes
roles:
- role: proxmox_lxc
when: proxmox_lxc_configure
- hosts: k3s_cluster
- name: Prepare k3s nodes
hosts: k3s_cluster
gather_facts: yes
roles:
- role: lxc
@ -20,17 +21,20 @@
- role: raspberrypi
become: true
- hosts: master
- name: Setup k3s servers
hosts: master
roles:
- role: k3s/master
- role: k3s_server
become: true
- hosts: node
- name: Setup k3s agents
hosts: node
roles:
- role: k3s/node
- role: k3s_agent
become: true
- hosts: master
- name: Configure k3s cluster
hosts: master
roles:
- role: k3s/post
- role: k3s_server_post
become: true