Change to FQCN with ansible-lint fixer (#553)

* Change to FQCN with ansible-lint fixer

Since ansible-base 2.10 (later ansible-core), FQCN is the new way to go.

Updated .ansible-lint with a production profile and removed fqcn in skip_list.
Updated .yamllint with rules needed.

Ran ansible-lint --fix=all, then manually applied some minor changes.

* Changed octal value in molecule/ipv6/prepare.yml
This commit is contained in:
Richard Holmboe 2024-08-13 05:59:59 +02:00 committed by GitHub
parent 635f0b21b3
commit b077a49e1f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
49 changed files with 317 additions and 317 deletions

View File

@ -1,21 +1,21 @@
---
profile: production
exclude_paths:
# default paths
- '.cache/'
- '.github/'
- 'test/fixtures/formatting-before/'
- 'test/fixtures/formatting-prettier/'
- .cache/
- .github/
- test/fixtures/formatting-before/
- test/fixtures/formatting-prettier/
# The "converge" and "reset" playbooks use import_playbook in
# conjunction with the "env" lookup plugin, which lets the
# syntax check of ansible-lint fail.
- 'molecule/**/converge.yml'
- 'molecule/**/prepare.yml'
- 'molecule/**/reset.yml'
- molecule/**/converge.yml
- molecule/**/prepare.yml
- molecule/**/reset.yml
# The file was generated by galaxy ansible - don't mess with it.
- 'galaxy.yml'
- galaxy.yml
skip_list:
- 'fqcn-builtins'
- var-naming[no-role-prefix]

View File

@ -2,10 +2,19 @@
extends: default
rules:
comments:
min-spaces-from-content: 1
comments-indentation: false
braces:
max-spaces-inside: 1
octal-values:
forbid-implicit-octal: true
forbid-explicit-octal: true
line-length:
max: 120
level: warning
truthy:
allowed-values: ['true', 'false']
allowed-values: ["true", "false"]
ignore:
- galaxy.yml

View File

@ -5,25 +5,25 @@ ansible_user: ansibleuser
systemd_dir: /etc/systemd/system
# Set your timezone
system_timezone: "Your/Timezone"
system_timezone: Your/Timezone
# interface which will be used for flannel
flannel_iface: "eth0"
flannel_iface: eth0
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
# calico_iface: "eth0"
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: "v3.28.0" # calico version tag
calico_ebpf: false # use eBPF dataplane instead of iptables
calico_tag: v3.28.0 # calico version tag
# uncomment cilium_iface to use cilium cni instead of flannel or calico
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
# cilium_iface: "eth0"
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
cilium_tag: "v1.16.0" # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
cilium_mode: native # native when nodes on same subnet or using bgp, else set routed
cilium_tag: v1.16.0 # cilium version tag
cilium_hubble: true # enable hubble observability relay and ui
# if using calico or cilium, you may specify the cluster pod cidr pool
cluster_cidr: "10.52.0.0/16"
cluster_cidr: 10.52.0.0/16
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
cilium_bgp: false
@ -31,8 +31,8 @@ cilium_bgp: false
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
cilium_bgp_my_asn: "64513"
cilium_bgp_peer_asn: "64512"
cilium_bgp_peer_address: "192.168.30.1"
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
cilium_bgp_peer_address: 192.168.30.1
cilium_bgp_lb_cidr: 192.168.31.0/24 # cidr for cilium loadbalancer ipam
# enable kube-vip ARP broadcasts
kube_vip_arp: true
@ -47,11 +47,11 @@ kube_vip_bgp_peeraddress: "192.168.30.1" # Defines the address for the BGP peer
kube_vip_bgp_peeras: "64512" # Defines the AS for the BGP peer
# apiserver_endpoint is virtual ip-address which will be configured on each master
apiserver_endpoint: "192.168.30.222"
apiserver_endpoint: 192.168.30.222
# k3s_token is required masters can talk together securely
# this token should be alpha numeric only
k3s_token: "some-SUPER-DEDEUPER-secret-password"
k3s_token: some-SUPER-DEDEUPER-secret-password
# The IP on which the node is reachable in the cluster.
# Here, a sensible default is provided, you can still override
@ -84,7 +84,7 @@ extra_agent_args: >-
{{ extra_args }}
# image tag for kube-vip
kube_vip_tag_version: "v0.8.2"
kube_vip_tag_version: v0.8.2
# tag for kube-vip-cloud-provider manifest
# kube_vip_cloud_provider_tag_version: "main"
@ -94,10 +94,10 @@ kube_vip_tag_version: "v0.8.2"
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
# metallb type frr or native
metal_lb_type: "native"
metal_lb_type: native
# metallb mode layer2 or bgp
metal_lb_mode: "layer2"
metal_lb_mode: layer2
# bgp options
# metal_lb_bgp_my_asn: "64513"
@ -105,11 +105,11 @@ metal_lb_mode: "layer2"
# metal_lb_bgp_peer_address: "192.168.30.1"
# image tag for metal lb
metal_lb_speaker_tag_version: "v0.14.8"
metal_lb_controller_tag_version: "v0.14.8"
metal_lb_speaker_tag_version: v0.14.8
metal_lb_controller_tag_version: v0.14.8
# metallb ip range for load balancer
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
metal_lb_ip_range: 192.168.30.80-192.168.30.90
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
# in your hosts.ini file.

View File

@ -1,2 +1,2 @@
---
ansible_user: '{{ proxmox_lxc_ssh_user }}'
ansible_user: "{{ proxmox_lxc_ssh_user }}"

View File

@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master

View File

@ -12,5 +12,5 @@
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.224"
metal_lb_ip_range: "192.168.30.100-192.168.30.109"
apiserver_endpoint: 192.168.30.224
metal_lb_ip_range: 192.168.30.100-192.168.30.109

View File

@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master

View File

@ -12,5 +12,5 @@
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
metal_lb_ip_range: "192.168.30.110-192.168.30.119"
apiserver_endpoint: 192.168.30.225
metal_lb_ip_range: 192.168.30.110-192.168.30.119

View File

@ -4,7 +4,6 @@ dependency:
driver:
name: vagrant
platforms:
- name: control1
box: generic/ubuntu2204
memory: 1024
@ -18,8 +17,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
- name: control2
box: generic/debian12
@ -56,8 +55,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
- name: node2
box: generic/rocky9

View File

@ -17,8 +17,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
- name: control2
box: generic/ubuntu2204
@ -33,8 +33,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
- name: node1
box: generic/ubuntu2204
@ -49,8 +49,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
provisioner:
name: ansible
env:

View File

@ -38,7 +38,7 @@
dest: /etc/netplan/55-flannel-ipv4.yaml
owner: root
group: root
mode: 0644
mode: "0644"
register: netplan_template
- name: Apply netplan configuration

View File

@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master

View File

@ -12,6 +12,6 @@
retry_count: 45
# Make sure that our IP ranges do not collide with those of the other scenarios
apiserver_endpoint: "192.168.30.225"
apiserver_endpoint: 192.168.30.225
# Use kube-vip instead of MetalLB
kube_vip_lb_ip_range: "192.168.30.110-192.168.30.119"
kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119

View File

@ -27,7 +27,7 @@
name: nginx
namespace: "{{ testing_namespace }}"
kubeconfig: "{{ kubecfg_path }}"
vars: &load_balancer_metadata
vars:
metallb_ip: status.loadBalancer.ingress[0].ip
metallb_port: spec.ports[0].port
register: nginx_services
@ -43,10 +43,10 @@
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
port_: >-
{{ nginx_services.resources[0].spec.ports[0].port }}
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
always:
- name: "Remove namespace: {{ testing_namespace }}"

View File

@ -9,7 +9,7 @@
ansible.builtin.assert:
that: found_nodes == expected_nodes
success_msg: "Found nodes as expected: {{ found_nodes }}"
fail_msg: "Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}"
fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}
vars:
found_nodes: >-
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
@ -22,7 +22,7 @@
| unique
| sort
}}
# Deactivated linter rules:
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]

View File

@ -11,8 +11,8 @@ platforms:
config_options:
# We currently can not use public-key based authentication on Ubuntu 22.04,
# see: https://github.com/chef/bento/issues/1405
ssh.username: "vagrant"
ssh.password: "vagrant"
ssh.username: vagrant
ssh.password: vagrant
groups:
- k3s_cluster
- master

View File

@ -12,5 +12,5 @@
retry_count: 45
# Make sure that our IP ranges do not collide with those of the default scenario
apiserver_endpoint: "192.168.30.223"
metal_lb_ip_range: "192.168.30.91-192.168.30.99"
apiserver_endpoint: 192.168.30.223
metal_lb_ip_range: 192.168.30.91-192.168.30.99

View File

@ -5,6 +5,6 @@
tasks:
- name: Reboot the nodes (and Wait upto 5 mins max)
become: true
reboot:
ansible.builtin.reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 300

View File

@ -7,11 +7,11 @@
become: true
- role: raspberrypi
become: true
vars: {state: absent}
vars: { state: absent }
post_tasks:
- name: Reboot and wait for node to come back up
become: true
reboot:
ansible.builtin.reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
reboot_timeout: 3600

View File

@ -1,36 +1,34 @@
---
- name: Download k3s binary x64
get_url:
ansible.builtin.get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
mode: "0755"
when: ansible_facts.architecture == "x86_64"
- name: Download k3s binary arm64
get_url:
ansible.builtin.get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
mode: "0755"
when:
- ( ansible_facts.architecture is search("arm") and
ansible_facts.userspace_bits == "64" ) or
ansible_facts.architecture is search("aarch64")
- ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" )
or ansible_facts.architecture is search("aarch64")
- name: Download k3s binary armhf
get_url:
ansible.builtin.get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
mode: "0755"
when:
- ansible_facts.architecture is search("arm")
- ansible_facts.userspace_bits == "32"

View File

@ -1,18 +1,18 @@
---
- name: Create k3s-node.service.d directory
file:
path: '{{ systemd_dir }}/k3s-node.service.d'
ansible.builtin.file:
path: "{{ systemd_dir }}/k3s-node.service.d"
state: directory
owner: root
group: root
mode: '0755'
mode: "0755"
when: proxy_env is defined
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
ansible.builtin.template:
src: http_proxy.conf.j2
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'
mode: "0755"
when: proxy_env is defined

View File

@ -17,16 +17,16 @@
ansible.builtin.include_tasks: http_proxy.yml
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
ansible.builtin.include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Configure the k3s service
ansible.builtin.template:
src: "k3s.service.j2"
src: k3s.service.j2
dest: "{{ systemd_dir }}/k3s-node.service"
owner: root
group: root
mode: '0755'
mode: "0755"
- name: Manage k3s service
ansible.builtin.systemd:

View File

@ -1,17 +1,16 @@
---
- name: Create directory /etc/rancher/k3s
file:
path: "/etc/{{ item }}"
ansible.builtin.file:
path: /etc/{{ item }}
state: directory
mode: '0755'
mode: "0755"
loop:
- rancher
- rancher/k3s
- name: Insert registries into /etc/rancher/k3s/registries.yaml
blockinfile:
ansible.builtin.blockinfile:
path: /etc/rancher/k3s/registries.yaml
block: "{{ custom_registries_yaml }}"
mode: '0600'
mode: "0600"
create: true

View File

@ -4,14 +4,14 @@ extra_server_args: ""
group_name_master: master
kube_vip_arp: true
kube_vip_iface: ~
kube_vip_iface:
kube_vip_cloud_provider_tag_version: main
kube_vip_tag_version: v0.7.2
kube_vip_bgp: false
kube_vip_bgp_routerid: "127.0.0.1"
kube_vip_bgp_routerid: 127.0.0.1
kube_vip_bgp_as: "64513"
kube_vip_bgp_peeraddress: "192.168.30.1"
kube_vip_bgp_peeraddress: 192.168.30.1
kube_vip_bgp_peeras: "64512"
metal_lb_controller_tag_version: v0.14.3

View File

@ -23,6 +23,6 @@
ansible.builtin.template:
src: content.j2
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
mode: 0644
mode: "0644"
vars:
content: "{{ k3s_init_log.stdout }}"

View File

@ -1,17 +1,16 @@
---
- name: Create k3s.service.d directory
file:
path: '{{ systemd_dir }}/k3s.service.d'
ansible.builtin.file:
path: "{{ systemd_dir }}/k3s.service.d"
state: directory
owner: root
group: root
mode: '0755'
mode: "0755"
- name: Copy K3s http_proxy conf file
template:
src: "http_proxy.conf.j2"
ansible.builtin.template:
src: http_proxy.conf.j2
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
owner: root
group: root
mode: '0755'
mode: "0755"

View File

@ -1,27 +1,27 @@
---
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip cloud provider manifest to first master
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml"
url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length]
dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy kubevip configMap manifest to first master
template:
src: "kubevip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/kubevip.yaml"
ansible.builtin.template:
src: kubevip.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@ -1,133 +1,129 @@
---
- name: Stop k3s-init
systemd:
ansible.builtin.systemd:
name: k3s-init
state: stopped
failed_when: false
# k3s-init won't work if the port is already in use
- name: Stop k3s
systemd:
ansible.builtin.systemd:
name: k3s
state: stopped
failed_when: false
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
# The systemd module does not support "reset-failed", so we need to resort to command.
command: systemctl reset-failed k3s-init
ansible.builtin.command: systemctl reset-failed k3s-init
failed_when: false
changed_when: false
- name: Deploy K3s http_proxy conf
include_tasks: http_proxy.yml
ansible.builtin.include_tasks: http_proxy.yml
when: proxy_env is defined
- name: Deploy vip manifest
include_tasks: vip.yml
ansible.builtin.include_tasks: vip.yml
- name: Deploy metallb manifest
include_tasks: metallb.yml
ansible.builtin.include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Deploy kube-vip manifest
include_tasks: kube-vip.yml
ansible.builtin.include_tasks: kube-vip.yml
tags: kubevip
when: kube_vip_lb_ip_range is defined
- name: Init cluster inside the transient k3s-init service
command:
cmd: "systemd-run -p RestartSec=2 \
-p Restart=on-failure \
--unit=k3s-init \
k3s server {{ server_init_args }}"
ansible.builtin.command:
cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }}
creates: "{{ systemd_dir }}/k3s-init.service"
- name: Verification
when: not ansible_check_mode
block:
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
command:
ansible.builtin.command:
cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}"
register: nodes
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
retries: "{{ retry_count | default(20) }}"
delay: 10
changed_when: false
always:
- name: Save logs of k3s-init.service
include_tasks: fetch_k3s_init_logs.yml
ansible.builtin.include_tasks: fetch_k3s_init_logs.yml
when: log_destination
vars:
log_destination: >-
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
- name: Kill the temporary service used for initialization
systemd:
ansible.builtin.systemd:
name: k3s-init
state: stopped
failed_when: false
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
ansible.builtin.template:
src: k3s.service.j2
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
mode: "0644"
- name: Enable and check K3s service
systemd:
ansible.builtin.systemd:
name: k3s
daemon_reload: true
state: restarted
enabled: true
- name: Wait for node-token
wait_for:
ansible.builtin.wait_for:
path: /var/lib/rancher/k3s/server/node-token
- name: Register node-token file access mode
stat:
ansible.builtin.stat:
path: /var/lib/rancher/k3s/server
register: p
- name: Change file access node-token
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server
mode: "g+rx,o+rx"
mode: g+rx,o+rx
- name: Read node-token from master
slurp:
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/node-token
register: node_token
- name: Store Master node-token
set_fact:
ansible.builtin.set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
ansible.builtin.file:
path: "{{ ansible_user_dir }}/.kube"
state: directory
owner: "{{ ansible_user_id }}"
mode: "u=rwx,g=rx,o="
mode: u=rwx,g=rx,o=
- name: Copy config file to user home directory
copy:
ansible.builtin.copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "{{ ansible_user_dir }}/.kube/config"
remote_src: true
owner: "{{ ansible_user_id }}"
mode: "u=rw,g=,o="
mode: u=rw,g=,o=
- name: Configure kubectl cluster to {{ endpoint_url }}
command: >-
ansible.builtin.command: >-
k3s kubectl config set-cluster default
--server={{ endpoint_url }}
--kubeconfig {{ ansible_user_dir }}/.kube/config
@ -135,39 +131,39 @@
vars:
endpoint_url: >-
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
# Deactivated linter rules:
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
# would be undefined. This will not be the case during playbook execution.
# noqa jinja[invalid]
- name: Create kubectl symlink
file:
ansible.builtin.file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
when: k3s_create_kubectl_symlink | default(true) | bool
- name: Create crictl symlink
file:
ansible.builtin.file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link
when: k3s_create_crictl_symlink | default(true) | bool
- name: Get contents of manifests folder
find:
ansible.builtin.find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: file
register: k3s_server_manifests
- name: Get sub dirs of manifests folder
find:
ansible.builtin.find:
paths: /var/lib/rancher/k3s/server/manifests
file_type: directory
register: k3s_server_manifests_directories
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
file:
ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items:

View File

@ -1,30 +1,30 @@
---
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml" # noqa yaml[line-length]
dest: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length]
dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
ansible.builtin.replace:
path: "/var/lib/rancher/k3s/server/manifests/metallb-crds.yaml"
path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
replace: "{{ item.to }}"
with_items:
- change: "metallb/speaker:{{ metal_lb_controller_tag_version }}"
to: "metallb/speaker:{{ metal_lb_speaker_tag_version }}"
- change: metallb/speaker:{{ metal_lb_controller_tag_version }}
to: metallb/speaker:{{ metal_lb_speaker_tag_version }}
loop_control:
label: "{{ item.change }} => {{ item.to }}"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@ -1,27 +1,27 @@
---
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /var/lib/rancher/k3s/server/manifests
state: directory
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Download vip rbac manifest to first master
ansible.builtin.get_url:
url: "https://kube-vip.io/manifests/rbac.yaml"
dest: "/var/lib/rancher/k3s/server/manifests/vip-rbac.yaml"
url: https://kube-vip.io/manifests/rbac.yaml
dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
- name: Copy vip manifest to first master
template:
src: "vip.yaml.j2"
dest: "/var/lib/rancher/k3s/server/manifests/vip.yaml"
ansible.builtin.template:
src: vip.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/vip.yaml
owner: root
group: root
mode: 0644
mode: "0644"
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']

View File

@ -4,28 +4,28 @@
run_once: true
block:
- name: Create manifests directory on first master
file:
ansible.builtin.file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
mode: "0755"
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
dest: "/tmp/k3s/tigera-operator.yaml"
url: https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml
dest: /tmp/k3s/tigera-operator.yaml
owner: root
group: root
mode: 0755
mode: "0755"
- name: Copy Calico custom resources manifest to first master
ansible.builtin.template:
src: "calico.crs.j2"
src: calico.crs.j2
dest: /tmp/k3s/custom-resources.yaml
owner: root
group: root
mode: 0755
mode: "0755"
- name: Deploy or replace Tigera Operator
block:
@ -44,7 +44,7 @@
failed_when: "'Error' in replace_operator.stderr"
- name: Wait for Tigera Operator resources
command: >-
ansible.builtin.command: >-
k3s kubectl wait {{ item.type }}/{{ item.name }}
--namespace='tigera-operator'
--for=condition=Available=True
@ -55,7 +55,7 @@
retries: 7
delay: 7
with_items:
- {name: tigera-operator, type: deployment}
- { name: tigera-operator, type: deployment }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
@ -76,7 +76,7 @@
failed_when: "'Error' in apply_cr.stderr"
- name: Wait for Calico system resources to be available
command: >-
ansible.builtin.command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace='{{ item.namespace }}'
@ -94,11 +94,17 @@
retries: 30
delay: 7
with_items:
- {name: calico-typha, type: deployment, namespace: calico-system}
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
- { name: calico-typha, type: deployment, namespace: calico-system }
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
- name: csi-node-driver
type: daemonset
selector: k8s-app=csi-node-driver
namespace: calico-system
- name: calico-node
type: daemonset
selector: k8s-app=calico-node
namespace: calico-system
- { name: calico-apiserver, type: deployment, namespace: calico-apiserver }
loop_control:
label: "{{ item.type }}/{{ item.name }}"

View File

@ -4,12 +4,12 @@
run_once: true
block:
- name: Create tmp directory on first master
file:
ansible.builtin.file:
path: /tmp/k3s
state: directory
owner: root
group: root
mode: 0755
mode: "0755"
- name: Check if Cilium CLI is installed
ansible.builtin.command: cilium version
@ -19,7 +19,7 @@
ignore_errors: true
- name: Check for Cilium CLI version in command output
set_fact:
ansible.builtin.set_fact:
installed_cli_version: >-
{{
cilium_cli_installed.stdout_lines
@ -32,11 +32,11 @@
- name: Get latest stable Cilium CLI version file
ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt"
dest: "/tmp/k3s/cilium-cli-stable.txt"
url: https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt
dest: /tmp/k3s/cilium-cli-stable.txt
owner: root
group: root
mode: 0755
mode: "0755"
- name: Read Cilium CLI stable version from file
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
@ -52,7 +52,7 @@
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
- name: Determine if Cilium CLI needs installation or update
set_fact:
ansible.builtin.set_fact:
cilium_cli_needs_update: >-
{{
cilium_cli_installed.rc != 0 or
@ -70,15 +70,15 @@
- name: Download Cilium CLI and checksum
ansible.builtin.get_url:
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
dest: "/tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}"
dest: /tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}
owner: root
group: root
mode: 0755
mode: "0755"
loop:
- ".tar.gz"
- ".tar.gz.sha256sum"
- .tar.gz
- .tar.gz.sha256sum
vars:
cilium_base_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}"
cilium_base_url: https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}
- name: Verify the downloaded tarball
ansible.builtin.shell: |
@ -89,7 +89,7 @@
- name: Extract Cilium CLI to /usr/local/bin
ansible.builtin.unarchive:
src: "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
src: /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
dest: /usr/local/bin
remote_src: true
@ -98,8 +98,8 @@
path: "{{ item }}"
state: absent
loop:
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz"
- "/tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum"
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
- name: Wait for connectivity to kube VIP
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
@ -112,7 +112,7 @@
- name: Fail if kube VIP not reachable
ansible.builtin.fail:
msg: "API endpoint {{ apiserver_endpoint }} is not reachable"
msg: API endpoint {{ apiserver_endpoint }} is not reachable
when: ping_result.rc != 0
- name: Test for existing Cilium install
@ -125,7 +125,6 @@
- name: Check existing Cilium install
when: cilium_installed.rc == 0
block:
- name: Check Cilium version
ansible.builtin.command: cilium version
register: cilium_version
@ -134,7 +133,7 @@
ignore_errors: true
- name: Parse installed Cilium version
set_fact:
ansible.builtin.set_fact:
installed_cilium_version: >-
{{
cilium_version.stdout_lines
@ -145,7 +144,7 @@
}}
- name: Determine if Cilium needs update
set_fact:
ansible.builtin.set_fact:
cilium_needs_update: >-
{{ 'v' + installed_cilium_version != cilium_tag }}
@ -191,7 +190,7 @@
when: cilium_installed.rc != 0 or cilium_needs_update
- name: Wait for Cilium resources
command: >-
ansible.builtin.command: >-
{% if item.type == 'daemonset' %}
k3s kubectl wait pods
--namespace=kube-system
@ -209,10 +208,10 @@
retries: 30
delay: 7
with_items:
- {name: cilium-operator, type: deployment}
- {name: cilium, type: daemonset, selector: 'k8s-app=cilium'}
- {name: hubble-relay, type: deployment, check_hubble: true}
- {name: hubble-ui, type: deployment, check_hubble: true}
- { name: cilium-operator, type: deployment }
- { name: cilium, type: daemonset, selector: k8s-app=cilium }
- { name: hubble-relay, type: deployment, check_hubble: true }
- { name: hubble-ui, type: deployment, check_hubble: true }
loop_control:
label: "{{ item.type }}/{{ item.name }}"
when: >-
@ -221,14 +220,13 @@
- name: Configure Cilium BGP
when: cilium_bgp
block:
- name: Copy BGP manifests to first master
ansible.builtin.template:
src: "cilium.crs.j2"
src: cilium.crs.j2
dest: /tmp/k3s/cilium-bgp.yaml
owner: root
group: root
mode: 0755
mode: "0755"
- name: Apply BGP manifests
ansible.builtin.command:

View File

@ -1,20 +1,20 @@
---
- name: Deploy calico
include_tasks: calico.yml
ansible.builtin.include_tasks: calico.yml
tags: calico
when: calico_iface is defined and cilium_iface is not defined
- name: Deploy cilium
include_tasks: cilium.yml
ansible.builtin.include_tasks: cilium.yml
tags: cilium
when: cilium_iface is defined
- name: Deploy metallb pool
include_tasks: metallb.yml
ansible.builtin.include_tasks: metallb.yml
tags: metallb
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
- name: Remove tmp directory used for manifests
file:
ansible.builtin.file:
path: /tmp/k3s
state: absent

View File

@ -1,15 +1,15 @@
---
- name: Create manifests directory for temp configuration
file:
ansible.builtin.file:
path: /tmp/k3s
state: directory
owner: "{{ ansible_user_id }}"
mode: 0755
mode: "0755"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Delete outdated metallb replicas
shell: |-
ansible.builtin.shell: |-
set -o pipefail
REPLICAS=$(k3s kubectl --namespace='metallb-system' get replicasets \
@ -30,23 +30,23 @@
with_items: "{{ groups[group_name_master | default('master')] }}"
- name: Copy metallb CRs manifest to first master
template:
src: "metallb.crs.j2"
dest: "/tmp/k3s/metallb-crs.yaml"
ansible.builtin.template:
src: metallb.crs.j2
dest: /tmp/k3s/metallb-crs.yaml
owner: "{{ ansible_user_id }}"
mode: 0755
mode: "0755"
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Test metallb-system namespace
command: >-
ansible.builtin.command: >-
k3s kubectl -n metallb-system
changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Wait for MetalLB resources
command: >-
ansible.builtin.command: >-
k3s kubectl wait {{ item.resource }}
--namespace='metallb-system'
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
@ -84,7 +84,7 @@
label: "{{ item.description }}"
- name: Set metallb webhook service name
set_fact:
ansible.builtin.set_fact:
metallb_webhook_service_name: >-
{{
(
@ -98,14 +98,14 @@
}}
- name: Test metallb-system webhook-service endpoint
command: >-
ansible.builtin.command: >-
k3s kubectl -n metallb-system get endpoints {{ metallb_webhook_service_name }}
changed_when: false
with_items: "{{ groups[group_name_master | default('master')] }}"
run_once: true
- name: Apply metallb CRs
command: >-
ansible.builtin.command: >-
k3s kubectl apply -f /tmp/k3s/metallb-crs.yaml
--timeout='{{ metal_lb_available_timeout }}'
register: this
@ -115,7 +115,7 @@
retries: 5
- name: Test metallb-system resources for Layer 2 configuration
command: >-
ansible.builtin.command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true
@ -125,7 +125,7 @@
- L2Advertisement
- name: Test metallb-system resources for BGP configuration
command: >-
ansible.builtin.command: >-
k3s kubectl -n metallb-system get {{ item }}
changed_when: false
run_once: true

View File

@ -1,6 +1,6 @@
---
- name: Reboot server
become: true
reboot:
ansible.builtin.reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot server

View File

@ -1,20 +1,20 @@
---
- name: Check for rc.local file
stat:
ansible.builtin.stat:
path: /etc/rc.local
register: rcfile
- name: Create rc.local if needed
lineinfile:
ansible.builtin.lineinfile:
path: /etc/rc.local
line: "#!/bin/sh -e"
create: true
insertbefore: BOF
mode: "u=rwx,g=rx,o=rx"
mode: u=rwx,g=rx,o=rx
when: not rcfile.stat.exists
- name: Write rc.local file
blockinfile:
ansible.builtin.blockinfile:
path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
state: present

View File

@ -34,10 +34,10 @@
tags: sysctl
- name: Add br_netfilter to /etc/modules-load.d/
copy:
content: "br_netfilter"
ansible.builtin.copy:
content: br_netfilter
dest: /etc/modules-load.d/br_netfilter.conf
mode: "u=rw,g=,o="
mode: u=rw,g=,o=
when: ansible_os_family == "RedHat"
- name: Load br_netfilter
@ -59,11 +59,11 @@
tags: sysctl
- name: Add /usr/local/bin to sudo secure_path
lineinfile:
line: 'Defaults secure_path = {{ secure_path[ansible_os_family] }}'
regexp: "Defaults(\\s)*secure_path(\\s)*="
ansible.builtin.lineinfile:
line: Defaults secure_path = {{ secure_path[ansible_os_family] }}
regexp: Defaults(\s)*secure_path(\s)*=
state: present
insertafter: EOF
path: /etc/sudoers
validate: 'visudo -cf %s'
validate: visudo -cf %s
when: ansible_os_family in [ "RedHat", "Suse" ]

View File

@ -2,12 +2,12 @@
- name: Reboot containers
block:
- name: Get container ids from filtered files
set_fact:
ansible.builtin.set_fact:
proxmox_lxc_filtered_ids: >-
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
listen: reboot containers
- name: Reboot container
command: "pct reboot {{ item }}"
ansible.builtin.command: pct reboot {{ item }}
loop: "{{ proxmox_lxc_filtered_ids }}"
changed_when: true
listen: reboot containers

View File

@ -1,44 +1,43 @@
---
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
ansible.builtin.stat:
path: /etc/pve/lxc/{{ item }}.conf
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
ansible.builtin.set_fact:
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length]
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
- name: Ensure lxc config has the right apparmor profile
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
regexp: ^lxc.apparmor.profile
line: "lxc.apparmor.profile: unconfined"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cgroup
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
regexp: ^lxc.cgroup.devices.allow
line: "lxc.cgroup.devices.allow: a"
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right cap drop
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
regexp: ^lxc.cap.drop
line: "lxc.cap.drop: "
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Ensure lxc config has the right mounts
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
regexp: ^lxc.mount.auto
line: 'lxc.mount.auto: "proc:rw sys:rw"'
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers

View File

@ -1,5 +1,5 @@
---
- name: Reboot
reboot:
ansible.builtin.reboot:
reboot_command: "{{ custom_reboot_command | default(omit) }}"
listen: reboot

View File

@ -1,38 +1,37 @@
---
- name: Test for raspberry pi /proc/cpuinfo
command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo
ansible.builtin.command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo
register: grep_cpuinfo_raspberrypi
failed_when: false
changed_when: false
- name: Test for raspberry pi /proc/device-tree/model
command: grep -E "Raspberry Pi" /proc/device-tree/model
ansible.builtin.command: grep -E "Raspberry Pi" /proc/device-tree/model
register: grep_device_tree_model_raspberrypi
failed_when: false
changed_when: false
- name: Set raspberry_pi fact to true
set_fact:
ansible.builtin.set_fact:
raspberry_pi: true
when:
grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
when: grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0
- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm)
set_fact:
ansible.builtin.set_fact:
detected_distribution: Raspbian
vars:
allowed_descriptions:
- "[Rr]aspbian.*"
- "Debian.*buster"
- "Debian.*bullseye"
- "Debian.*bookworm"
- Debian.*buster
- Debian.*bullseye
- Debian.*bookworm
when:
- ansible_facts.architecture is search("aarch64")
- raspberry_pi|default(false)
- ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|'))
- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm)
set_fact:
ansible.builtin.set_fact:
detected_distribution: Raspbian
when:
- ansible_facts.architecture is search("aarch64")
@ -40,13 +39,13 @@
- ansible_facts.lsb.description|default("") is match("Debian.*bookworm")
- name: Set detected_distribution_major_version
set_fact:
ansible.builtin.set_fact:
detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}"
when:
- detected_distribution | default("") == "Raspbian"
- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }}
include_tasks: "{{ item }}"
ansible.builtin.include_tasks: "{{ item }}"
with_first_found:
- "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml"
- "{{ action_ }}/{{ detected_distribution }}.yml"

View File

@ -1,13 +1,13 @@
---
- name: Test for cmdline path
stat:
ansible.builtin.stat:
path: /boot/firmware/cmdline.txt
register: boot_cmdline_path
failed_when: false
changed_when: false
- name: Set cmdline path based on Debian version and command result
set_fact:
ansible.builtin.set_fact:
cmdline_path: >-
{{
(
@ -20,20 +20,20 @@
}}
- name: Activating cgroup support
lineinfile:
ansible.builtin.lineinfile:
path: "{{ cmdline_path }}"
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory
backrefs: true
notify: reboot
- name: Install iptables
apt:
ansible.builtin.apt:
name: iptables
state: present
- name: Flush iptables before changing to iptables-legacy
iptables:
ansible.builtin.iptables:
flush: true
- name: Changing to iptables-legacy

View File

@ -1,9 +1,9 @@
---
- name: Enable cgroup via boot commandline if not already enabled for Rocky
lineinfile:
ansible.builtin.lineinfile:
path: /boot/cmdline.txt
backrefs: true
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory
notify: reboot
when: not ansible_check_mode

View File

@ -1,13 +1,13 @@
---
- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi
lineinfile:
ansible.builtin.lineinfile:
path: /boot/firmware/cmdline.txt
backrefs: true
regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$'
line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory'
regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$
line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory
notify: reboot
- name: Install linux-modules-extra-raspi
apt:
ansible.builtin.apt:
name: linux-modules-extra-raspi
state: present

View File

@ -1,5 +1,5 @@
---
- name: Remove linux-modules-extra-raspi
apt:
ansible.builtin.apt:
name: linux-modules-extra-raspi
state: absent

View File

@ -1,6 +1,6 @@
---
- name: Disable services
systemd:
ansible.builtin.systemd:
name: "{{ item }}"
state: stopped
enabled: false
@ -12,12 +12,12 @@
- name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
register: pkill_containerd_shim_runc
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: "pkill_containerd_shim_runc.rc == 0"
ansible.builtin.command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: pkill_containerd_shim_runc.rc == 0
failed_when: false
- name: Umount k3s filesystems
include_tasks: umount_with_children.yml
ansible.builtin.include_tasks: umount_with_children.yml
with_items:
- /run/k3s
- /var/lib/kubelet
@ -30,7 +30,7 @@
loop_var: mounted_fs
- name: Remove service files, binaries and data
file:
ansible.builtin.file:
name: "{{ item }}"
state: absent
with_items:
@ -48,7 +48,7 @@
- /etc/cni/net.d
- name: Remove K3s http_proxy files
file:
ansible.builtin.file:
name: "{{ item }}"
state: absent
with_items:
@ -59,22 +59,22 @@
when: proxy_env is defined
- name: Reload daemon_reload
systemd:
ansible.builtin.systemd:
daemon_reload: true
- name: Remove tmp directory used for manifests
file:
ansible.builtin.file:
path: /tmp/k3s
state: absent
- name: Check if rc.local exists
stat:
ansible.builtin.stat:
path: /etc/rc.local
register: rcfile
- name: Remove rc.local modifications for proxmox lxc containers
become: true
blockinfile:
ansible.builtin.blockinfile:
path: /etc/rc.local
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
create: false
@ -83,14 +83,14 @@
- name: Check rc.local for cleanup
become: true
slurp:
ansible.builtin.slurp:
src: /etc/rc.local
register: rcslurp
when: proxmox_lxc_configure and rcfile.stat.exists
- name: Cleanup rc.local if we only have a Shebang line
become: true
file:
ansible.builtin.file:
path: /etc/rc.local
state: absent
when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1

View File

@ -1,6 +1,6 @@
---
- name: Get the list of mounted filesystems
shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
ansible.builtin.shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
register: get_mounted_filesystems
args:
executable: /bin/bash
@ -12,5 +12,4 @@
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items:
"{{ get_mounted_filesystems.stdout_lines | reverse | list }}"
with_items: "{{ get_mounted_filesystems.stdout_lines | reverse | list }}"

View File

@ -1,46 +1,45 @@
---
- name: Check for container files that exist on this host
stat:
path: "/etc/pve/lxc/{{ item }}.conf"
ansible.builtin.stat:
path: /etc/pve/lxc/{{ item }}.conf
loop: "{{ proxmox_lxc_ct_ids }}"
register: stat_results
- name: Filter out files that do not exist
set_fact:
proxmox_lxc_filtered_files:
'{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}'
ansible.builtin.set_fact:
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length]
- name: Remove LXC apparmor profile
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.apparmor.profile"
regexp: ^lxc.apparmor.profile
line: "lxc.apparmor.profile: unconfined"
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc cgroups
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cgroup.devices.allow"
regexp: ^lxc.cgroup.devices.allow
line: "lxc.cgroup.devices.allow: a"
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc cap drop
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.cap.drop"
regexp: ^lxc.cap.drop
line: "lxc.cap.drop: "
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"
notify: reboot containers
- name: Remove lxc mounts
lineinfile:
ansible.builtin.lineinfile:
dest: "{{ item }}"
regexp: "^lxc.mount.auto"
regexp: ^lxc.mount.auto
line: 'lxc.mount.auto: "proc:rw sys:rw"'
state: absent
loop: "{{ proxmox_lxc_filtered_files }}"

View File

@ -3,8 +3,8 @@
hosts: all
pre_tasks:
- name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible)
assert:
that: "ansible_version.full is version_compare('2.11', '>=')"
ansible.builtin.assert:
that: ansible_version.full is version_compare('2.11', '>=')
msg: >
"Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/"