From beb0d4dbffcba5e04a83784ec3578586412a0dee Mon Sep 17 00:00:00 2001 From: celoman Date: Tue, 8 Apr 2025 11:18:50 +0000 Subject: [PATCH] torre --- k3s-ansible-copia/.ansible-lint | 21 + k3s-ansible-copia/.editorconfig | 13 + k3s-ansible-copia/.pre-commit-config.yaml | 35 + k3s-ansible-copia/.yamllint | 20 + k3s-ansible-copia/LICENSE | 177 ++ k3s-ansible-copia/README.md | 235 ++ k3s-ansible-copia/ansible.cfg | 2 + k3s-ansible-copia/ansible.example.cfg | 2 + .../collections/requirements.yml | 6 + k3s-ansible-copia/deploy.sh | 3 + k3s-ansible-copia/error | 2339 +++++++++++++++++ k3s-ansible-copia/example/deployment.yml | 20 + k3s-ansible-copia/example/service.yml | 13 + k3s-ansible-copia/fk | 1 + k3s-ansible-copia/galaxy.yml | 81 + k3s-ansible-copia/inventory/.gitignore | 3 + .../inventory/sample/group_vars/all.yml | 171 ++ .../inventory/sample/group_vars/proxmox.yml | 2 + k3s-ansible-copia/inventory/sample/hosts.ini | 17 + k3s-ansible-copia/k3s.crt | 0 k3s-ansible-copia/k3s_ca.crt | 0 k3s-ansible-copia/kubeconfig | 19 + k3s-ansible-copia/molecule/README.md | 79 + .../molecule/calico/molecule.yml | 49 + .../molecule/calico/overrides.yml | 16 + .../molecule/cilium/molecule.yml | 49 + .../molecule/cilium/overrides.yml | 16 + .../molecule/default/molecule.yml | 99 + .../molecule/default/overrides.yml | 12 + .../molecule/default/prepare.yml | 22 + k3s-ansible-copia/molecule/ipv6/README.md | 35 + .../molecule/ipv6/host_vars/control1.yml | 3 + .../molecule/ipv6/host_vars/control2.yml | 3 + .../molecule/ipv6/host_vars/node1.yml | 3 + k3s-ansible-copia/molecule/ipv6/molecule.yml | 81 + k3s-ansible-copia/molecule/ipv6/overrides.yml | 51 + k3s-ansible-copia/molecule/ipv6/prepare.yml | 51 + .../ipv6/templates/55-flannel-ipv4.yaml.j2 | 8 + .../molecule/kube-vip/molecule.yml | 49 + .../molecule/kube-vip/overrides.yml | 17 + .../molecule/resources/converge.yml | 7 + .../molecule/resources/reset.yml | 7 + .../molecule/resources/verify.yml | 5 + .../verify_from_outside/defaults/main.yml | 9 + .../tasks/kubecfg-cleanup.yml | 5 + .../tasks/kubecfg-fetch.yml | 19 + .../verify_from_outside/tasks/main.yml | 14 + .../tasks/test/deploy-example.yml | 58 + .../tasks/test/get-nodes.yml | 28 + .../molecule/single_node/molecule.yml | 49 + .../molecule/single_node/overrides.yml | 16 + k3s-ansible-copia/reboot.sh | 3 + k3s-ansible-copia/reboot.yml | 10 + k3s-ansible-copia/requirements.in | 10 + k3s-ansible-copia/requirements.txt | 169 ++ k3s-ansible-copia/reset.sh | 3 + k3s-ansible-copia/reset.yml | 25 + .../roles/download/meta/main.yml | 8 + .../roles/download/tasks/main.yml | 34 + .../roles/k3s/node/defaults/main.yml | 3 + .../roles/k3s_agent/defaults/main.yml | 4 + .../roles/k3s_agent/meta/main.yml | 39 + .../roles/k3s_agent/tasks/http_proxy.yml | 18 + .../roles/k3s_agent/tasks/main.yml | 36 + .../k3s_agent/templates/http_proxy.conf.j2 | 4 + .../roles/k3s_agent/templates/k3s.service.j2 | 27 + .../roles/k3s_custom_registries/meta/main.yml | 20 + .../k3s_custom_registries/tasks/main.yml | 16 + .../roles/k3s_server/defaults/main.yml | 40 + .../roles/k3s_server/meta/main.yml | 135 + .../k3s_server/tasks/fetch_k3s_init_logs.yml | 28 + .../roles/k3s_server/tasks/http_proxy.yml | 16 + .../roles/k3s_server/tasks/kube-vip.yml | 27 + .../roles/k3s_server/tasks/main.yml | 173 ++ .../roles/k3s_server/tasks/metallb.yml | 30 + .../roles/k3s_server/tasks/vip.yml | 31 + .../roles/k3s_server/templates/content.j2 | 5 + .../k3s_server/templates/http_proxy.conf.j2 | 4 + .../roles/k3s_server/templates/k3s.service.j2 | 24 + .../k3s_server/templates/kubevip.yaml.j2 | 13 + .../roles/k3s_server/templates/vip.yaml.j2 | 104 + .../roles/k3s_server_post/defaults/main.yml | 32 + .../roles/k3s_server_post/meta/main.yml | 153 ++ .../roles/k3s_server_post/tasks/calico.yml | 120 + .../roles/k3s_server_post/tasks/cilium.yml | 256 ++ .../roles/k3s_server_post/tasks/main.yml | 20 + .../roles/k3s_server_post/tasks/metallb.yml | 136 + .../k3s_server_post/templates/calico.crs.j2 | 41 + .../k3s_server_post/templates/cilium.crs.j2 | 48 + .../k3s_server_post/templates/metallb.crs.j2 | 43 + k3s-ansible-copia/roles/lxc/handlers/main.yml | 6 + k3s-ansible-copia/roles/lxc/meta/main.yml | 8 + k3s-ansible-copia/roles/lxc/tasks/main.yml | 21 + .../roles/prereq/defaults/main.yml | 4 + k3s-ansible-copia/roles/prereq/meta/main.yml | 7 + k3s-ansible-copia/roles/prereq/tasks/main.yml | 70 + .../roles/proxmox_lxc/handlers/main.yml | 13 + .../roles/proxmox_lxc/meta/main.yml | 9 + .../roles/proxmox_lxc/tasks/main.yml | 43 + .../roles/raspberrypi/defaults/main.yml | 6 + .../roles/raspberrypi/handlers/main.yml | 5 + .../roles/raspberrypi/meta/main.yml | 10 + .../roles/raspberrypi/tasks/main.yml | 59 + .../raspberrypi/tasks/setup/Raspbian.yml | 49 + .../roles/raspberrypi/tasks/setup/Rocky.yml | 9 + .../roles/raspberrypi/tasks/setup/Ubuntu.yml | 14 + .../roles/raspberrypi/tasks/setup/default.yml | 1 + .../raspberrypi/tasks/teardown/Raspbian.yml | 1 + .../raspberrypi/tasks/teardown/Rocky.yml | 1 + .../raspberrypi/tasks/teardown/Ubuntu.yml | 6 + .../raspberrypi/tasks/teardown/default.yml | 1 + .../roles/reset/defaults/main.yml | 2 + k3s-ansible-copia/roles/reset/meta/main.yml | 8 + k3s-ansible-copia/roles/reset/tasks/main.yml | 96 + .../reset/tasks/umount_with_children.yml | 15 + .../roles/reset_proxmox_lxc/handlers/main.yml | 1 + .../roles/reset_proxmox_lxc/meta/main.yml | 9 + .../roles/reset_proxmox_lxc/tasks/main.yml | 46 + k3s-ansible-copia/site.yml | 68 + k3s-ansible-copia/templates/rc.local.j2 | 8 + k3s-ansible-copia/xclip | 18 + .../roles/proxmox_lxc/handlers/main.yml | 0 122 files changed, 6562 insertions(+) create mode 100644 k3s-ansible-copia/.ansible-lint create mode 100644 k3s-ansible-copia/.editorconfig create mode 100644 k3s-ansible-copia/.pre-commit-config.yaml create mode 100644 k3s-ansible-copia/.yamllint create mode 100644 k3s-ansible-copia/LICENSE create mode 100644 k3s-ansible-copia/README.md create mode 100644 k3s-ansible-copia/ansible.cfg create mode 100644 k3s-ansible-copia/ansible.example.cfg create mode 100644 k3s-ansible-copia/collections/requirements.yml create mode 100755 k3s-ansible-copia/deploy.sh create mode 100644 k3s-ansible-copia/error create mode 100644 k3s-ansible-copia/example/deployment.yml create mode 100644 k3s-ansible-copia/example/service.yml create mode 100644 k3s-ansible-copia/fk create mode 100644 k3s-ansible-copia/galaxy.yml create mode 100644 k3s-ansible-copia/inventory/.gitignore create mode 100644 k3s-ansible-copia/inventory/sample/group_vars/all.yml create mode 100644 k3s-ansible-copia/inventory/sample/group_vars/proxmox.yml create mode 100644 k3s-ansible-copia/inventory/sample/hosts.ini create mode 100644 k3s-ansible-copia/k3s.crt create mode 100644 k3s-ansible-copia/k3s_ca.crt create mode 100644 k3s-ansible-copia/kubeconfig create mode 100644 k3s-ansible-copia/molecule/README.md create mode 100644 k3s-ansible-copia/molecule/calico/molecule.yml create mode 100644 k3s-ansible-copia/molecule/calico/overrides.yml create mode 100644 k3s-ansible-copia/molecule/cilium/molecule.yml create mode 100644 k3s-ansible-copia/molecule/cilium/overrides.yml create mode 100644 k3s-ansible-copia/molecule/default/molecule.yml create mode 100644 k3s-ansible-copia/molecule/default/overrides.yml create mode 100644 k3s-ansible-copia/molecule/default/prepare.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/README.md create mode 100644 k3s-ansible-copia/molecule/ipv6/host_vars/control1.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/host_vars/control2.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/host_vars/node1.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/molecule.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/overrides.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/prepare.yml create mode 100644 k3s-ansible-copia/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 create mode 100644 k3s-ansible-copia/molecule/kube-vip/molecule.yml create mode 100644 k3s-ansible-copia/molecule/kube-vip/overrides.yml create mode 100644 k3s-ansible-copia/molecule/resources/converge.yml create mode 100644 k3s-ansible-copia/molecule/resources/reset.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify_from_outside/defaults/main.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-cleanup.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-fetch.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/main.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/deploy-example.yml create mode 100644 k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/get-nodes.yml create mode 100644 k3s-ansible-copia/molecule/single_node/molecule.yml create mode 100644 k3s-ansible-copia/molecule/single_node/overrides.yml create mode 100755 k3s-ansible-copia/reboot.sh create mode 100644 k3s-ansible-copia/reboot.yml create mode 100644 k3s-ansible-copia/requirements.in create mode 100644 k3s-ansible-copia/requirements.txt create mode 100755 k3s-ansible-copia/reset.sh create mode 100644 k3s-ansible-copia/reset.yml create mode 100644 k3s-ansible-copia/roles/download/meta/main.yml create mode 100644 k3s-ansible-copia/roles/download/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/k3s/node/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_agent/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_agent/meta/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_agent/tasks/http_proxy.yml create mode 100644 k3s-ansible-copia/roles/k3s_agent/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_agent/templates/http_proxy.conf.j2 create mode 100644 k3s-ansible-copia/roles/k3s_agent/templates/k3s.service.j2 create mode 100644 k3s-ansible-copia/roles/k3s_custom_registries/meta/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_custom_registries/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/meta/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/tasks/fetch_k3s_init_logs.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/tasks/http_proxy.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/tasks/kube-vip.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/tasks/metallb.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/tasks/vip.yml create mode 100644 k3s-ansible-copia/roles/k3s_server/templates/content.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server/templates/http_proxy.conf.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server/templates/k3s.service.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server/templates/kubevip.yaml.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server/templates/vip.yaml.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server_post/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server_post/meta/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server_post/tasks/calico.yml create mode 100644 k3s-ansible-copia/roles/k3s_server_post/tasks/cilium.yml create mode 100644 k3s-ansible-copia/roles/k3s_server_post/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/k3s_server_post/tasks/metallb.yml create mode 100644 k3s-ansible-copia/roles/k3s_server_post/templates/calico.crs.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server_post/templates/cilium.crs.j2 create mode 100644 k3s-ansible-copia/roles/k3s_server_post/templates/metallb.crs.j2 create mode 100644 k3s-ansible-copia/roles/lxc/handlers/main.yml create mode 100644 k3s-ansible-copia/roles/lxc/meta/main.yml create mode 100644 k3s-ansible-copia/roles/lxc/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/prereq/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/prereq/meta/main.yml create mode 100644 k3s-ansible-copia/roles/prereq/tasks/main.yml create mode 100755 k3s-ansible-copia/roles/proxmox_lxc/handlers/main.yml create mode 100644 k3s-ansible-copia/roles/proxmox_lxc/meta/main.yml create mode 100644 k3s-ansible-copia/roles/proxmox_lxc/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/handlers/main.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/meta/main.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/setup/Raspbian.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/setup/Rocky.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/setup/Ubuntu.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/setup/default.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Raspbian.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Rocky.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Ubuntu.yml create mode 100644 k3s-ansible-copia/roles/raspberrypi/tasks/teardown/default.yml create mode 100644 k3s-ansible-copia/roles/reset/defaults/main.yml create mode 100644 k3s-ansible-copia/roles/reset/meta/main.yml create mode 100644 k3s-ansible-copia/roles/reset/tasks/main.yml create mode 100644 k3s-ansible-copia/roles/reset/tasks/umount_with_children.yml create mode 120000 k3s-ansible-copia/roles/reset_proxmox_lxc/handlers/main.yml create mode 100644 k3s-ansible-copia/roles/reset_proxmox_lxc/meta/main.yml create mode 100644 k3s-ansible-copia/roles/reset_proxmox_lxc/tasks/main.yml create mode 100644 k3s-ansible-copia/site.yml create mode 100644 k3s-ansible-copia/templates/rc.local.j2 create mode 100644 k3s-ansible-copia/xclip mode change 100644 => 100755 k3s-ansible/roles/proxmox_lxc/handlers/main.yml diff --git a/k3s-ansible-copia/.ansible-lint b/k3s-ansible-copia/.ansible-lint new file mode 100644 index 0000000..94f4c3b --- /dev/null +++ b/k3s-ansible-copia/.ansible-lint @@ -0,0 +1,21 @@ +--- +profile: production +exclude_paths: + # default paths + - .cache/ + - .github/ + - test/fixtures/formatting-before/ + - test/fixtures/formatting-prettier/ + + # The "converge" and "reset" playbooks use import_playbook in + # conjunction with the "env" lookup plugin, which lets the + # syntax check of ansible-lint fail. + - molecule/**/converge.yml + - molecule/**/prepare.yml + - molecule/**/reset.yml + + # The file was generated by galaxy ansible - don't mess with it. + - galaxy.yml + +skip_list: + - var-naming[no-role-prefix] diff --git a/k3s-ansible-copia/.editorconfig b/k3s-ansible-copia/.editorconfig new file mode 100644 index 0000000..02c5127 --- /dev/null +++ b/k3s-ansible-copia/.editorconfig @@ -0,0 +1,13 @@ +root = true +[*] +indent_style = space +indent_size = 2 +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +end_of_line = lf +max_line_length = off +[Makefile] +indent_style = tab +[*.go] +indent_style = tab diff --git a/k3s-ansible-copia/.pre-commit-config.yaml b/k3s-ansible-copia/.pre-commit-config.yaml new file mode 100644 index 0000000..c1e58c2 --- /dev/null +++ b/k3s-ansible-copia/.pre-commit-config.yaml @@ -0,0 +1,35 @@ +--- +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: requirements-txt-fixer + - id: sort-simple-yaml + - id: detect-private-key + - id: check-merge-conflict + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.33.0 + hooks: + - id: yamllint + args: [-c=.yamllint] + - repo: https://github.com/ansible-community/ansible-lint.git + rev: v6.22.2 + hooks: + - id: ansible-lint + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.9.0.6 + hooks: + - id: shellcheck + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.5.4 + hooks: + - id: remove-crlf + - id: remove-tabs + - repo: https://github.com/sirosen/texthooks + rev: 0.6.4 + hooks: + - id: fix-smartquotes diff --git a/k3s-ansible-copia/.yamllint b/k3s-ansible-copia/.yamllint new file mode 100644 index 0000000..12f8331 --- /dev/null +++ b/k3s-ansible-copia/.yamllint @@ -0,0 +1,20 @@ +--- +extends: default + +rules: + comments: + min-spaces-from-content: 1 + comments-indentation: false + braces: + max-spaces-inside: 1 + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: true + line-length: + max: 120 + level: warning + truthy: + allowed-values: ["true", "false"] + +ignore: + - galaxy.yml diff --git a/k3s-ansible-copia/LICENSE b/k3s-ansible-copia/LICENSE new file mode 100644 index 0000000..4757b96 --- /dev/null +++ b/k3s-ansible-copia/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/k3s-ansible-copia/README.md b/k3s-ansible-copia/README.md new file mode 100644 index 0000000..10cbafd --- /dev/null +++ b/k3s-ansible-copia/README.md @@ -0,0 +1,235 @@ +# Autoomated build of HA k3s Cluster with `kube-vip` and MetalLB + +![Fully Automated K3S etcd High Availability Install](https://img.youtube.com/vi/CbkEWcUZ7zM/0.jpg) + +This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`. + +This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`. + +If you want more context on how this works, see: + +πŸ“„ [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands) + +πŸ“Ί [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM) + +## πŸ“– k3s Ansible Playbook + +Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a HA Kubernetes cluster on machines running: + +- [x] Debian (tested on version 11) +- [x] Ubuntu (tested on version 22.04) +- [x] Rocky (tested on version 9) + +on processor architecture: + +- [X] x64 +- [X] arm64 +- [X] armhf + +## βœ… System requirements + +- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/). + +- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗) + +- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment. + +- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command. + +## πŸš€ Getting Started + +### 🍴 Preparation + +First create a new directory based on the `sample` directory within the `inventory` directory: + +```bash +cp -R inventory/sample inventory/my-cluster +``` + +Second, edit `inventory/my-cluster/hosts.ini` to match the system information gathered above + +For example: + +```ini +[master] +192.168.30.38 +192.168.30.39 +192.168.30.40 + +[node] +192.168.30.41 +192.168.30.42 + +[k3s_cluster:children] +master +node +``` + +If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/). + +Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created. + +This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable. + +If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment. + +### ☸️ Create Cluster + +Start provisioning of the cluster using the following command: + +```bash +ansible-playbook site.yml -i inventory/my-cluster/hosts.ini +``` + +After deployment control plane will be accessible via virtual ip-address which is defined in inventory/group_vars/all.yml as `apiserver_endpoint` + +### πŸ”₯ Remove k3s cluster + +```bash +ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini +``` + +>You should also reboot these nodes due to the VIP not being destroyed + +## βš™οΈ Kube Config + +To copy your `kube config` locally so that you can access your **Kubernetes** cluster run: + +```bash +scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config +``` +If you get file Permission denied, go into the node and temporarly run: +```bash +sudo chmod 777 /etc/rancher/k3s/k3s.yaml +``` +Then copy with the scp command and reset the permissions back to: +```bash +sudo chmod 600 /etc/rancher/k3s/k3s.yaml +``` + +You'll then want to modify the config to point to master IP by running: +```bash +sudo nano ~/.kube/config +``` +Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443` + +### πŸ”¨ Testing your cluster + +See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster). + +### Variables + +| Role(s) | Variable | Type | Default | Required | Description | +|---|---|---|---|---|---| +| `download` | `k3s_version` | string | ❌ | Required | K3s binaries version | +| `k3s_agent`, `k3s_server`, `k3s_server_post` | `apiserver_endpoint` | string | ❌ | Required | Virtual ip-address configured on each master | +| `k3s_agent` | `extra_agent_args` | string | `null` | Not required | Extra arguments for agents nodes | +| `k3s_agent`, `k3s_server` | `group_name_master` | string | `null` | Not required | Name othe master group | +| `k3s_agent` | `k3s_token` | string | `null` | Not required | Token used to communicate between masters | +| `k3s_agent`, `k3s_server` | `proxy_env` | dict | `null` | Not required | Internet proxy configurations | +| `k3s_agent`, `k3s_server` | `proxy_env.HTTP_PROXY` | string | ❌ | Required | HTTP internet proxy | +| `k3s_agent`, `k3s_server` | `proxy_env.HTTPS_PROXY` | string | ❌ | Required | HTTP internet proxy | +| `k3s_agent`, `k3s_server` | `proxy_env.NO_PROXY` | string | ❌ | Required | Addresses that will not use the proxies | +| `k3s_agent`, `k3s_server`, `reset` | `systemd_dir` | string | `/etc/systemd/system` | Not required | Path to systemd services | +| `k3s_custom_registries` | `custom_registries_yaml` | string | ❌ | Required | YAML block defining custom registries. The following is an example that pulls all images used in this playbook through your private registries. It also allows you to pull your own images from your private registry, without having to use imagePullSecrets in your deployments. If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images, you can just remove those from the mirrors: section. | +| `k3s_server`, `k3s_server_post` | `cilium_bgp` | bool | `~` | Not required | Enable cilium BGP control plane for LB services and pod cidrs. Disables the use of MetalLB. | +| `k3s_server`, `k3s_server_post` | `cilium_iface` | string | ❌ | Not required | The network interface used for when Cilium is enabled | +| `k3s_server` | `extra_server_args` | string | `""` | Not required | Extra arguments for server nodes | +| `k3s_server` | `k3s_create_kubectl_symlink` | bool | `false` | Not required | Create the kubectl -> k3s symlink | +| `k3s_server` | `k3s_create_crictl_symlink` | bool | `true` | Not required | Create the crictl -> k3s symlink | +| `k3s_server` | `kube_vip_arp` | bool | `true` | Not required | Enables kube-vip ARP broadcasts | +| `k3s_server` | `kube_vip_bgp` | bool | `false` | Not required | Enables kube-vip BGP peering | +| `k3s_server` | `kube_vip_bgp_routerid` | string | `"127.0.0.1"` | Not required | Defines the router ID for the kube-vip BGP server | +| `k3s_server` | `kube_vip_bgp_as` | string | `"64513"` | Not required | Defines the AS for the kube-vip BGP server | +| `k3s_server` | `kube_vip_bgp_peeraddress` | string | `"192.168.30.1"` | Not required | Defines the address for the kube-vip BGP peer | +| `k3s_server` | `kube_vip_bgp_peeras` | string | `"64512"` | Not required | Defines the AS for the kube-vip BGP peer | +| `k3s_server` | `kube_vip_bgp_peers` | list | `[]` | Not required | List of BGP peer ASN & address pairs | +| `k3s_server` | `kube_vip_bgp_peers_groups` | list | `['k3s_master']` | Not required | Inventory group in which to search for additional `kube_vip_bgp_peers` parameters to merge. | +| `k3s_server` | `kube_vip_iface` | string | `~` | Not required | Explicitly define an interface that ALL control nodes should use to propagate the VIP, define it here. Otherwise, kube-vip will determine the right interface automatically at runtime. | +| `k3s_server` | `kube_vip_tag_version` | string | `v0.7.2` | Not required | Image tag for kube-vip | +| `k3s_server` | `kube_vip_cloud_provider_tag_version` | string | `main` | Not required | Tag for kube-vip-cloud-provider manifest when enable | +| `k3s_server`, `k3_server_post` | `kube_vip_lb_ip_range` | string | `~` | Not required | IP range for kube-vip load balancer | +| `k3s_server`, `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB | +| `k3s_server` | `metal_lb_speaker_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB | +| `k3s_server` | `metal_lb_type` | string | `native` | Not required | Use FRR mode or native. Valid values are `frr` and `native` | +| `k3s_server` | `retry_count` | int | `20` | Not required | Amount of retries when verifying that nodes joined | +| `k3s_server` | `server_init_args` | string | ❌ | Not required | Arguments for server nodes | +| `k3s_server_post` | `bpf_lb_algorithm` | string | `maglev` | Not required | BPF lb algorithm | +| `k3s_server_post` | `bpf_lb_mode` | string | `hybrid` | Not required | BPF lb mode | +| `k3s_server_post` | `calico_blocksize` | int | `26` | Not required | IP pool block size | +| `k3s_server_post` | `calico_ebpf` | bool | `false` | Not required | Use eBPF dataplane instead of iptables | +| `k3s_server_post` | `calico_encapsulation` | string | `VXLANCrossSubnet` | Not required | IP pool encapsulation | +| `k3s_server_post` | `calico_natOutgoing` | string | `Enabled` | Not required | IP pool NAT outgoing | +| `k3s_server_post` | `calico_nodeSelector` | string | `all()` | Not required | IP pool node selector | +| `k3s_server_post` | `calico_iface` | string | `~` | Not required | The network interface used for when Calico is enabled | +| `k3s_server_post` | `calico_tag` | string | `v3.27.2` | Not required | Calico version tag | +| `k3s_server_post` | `cilium_bgp_my_asn` | int | `64513` | Not required | Local ASN for BGP peer | +| `k3s_server_post` | `cilium_bgp_peer_asn` | int | `64512` | Not required | BGP peer ASN | +| `k3s_server_post` | `cilium_bgp_peer_address` | string | `~` | Not required | BGP peer address | +| `k3s_server_post` | `cilium_bgp_neighbors` | list | `[]` | Not required | List of BGP peer ASN & address pairs | +| `k3s_server_post` | `cilium_bgp_neighbors_groups` | list | `['k3s_all']` | Not required | Inventory group in which to search for additional `cilium_bgp_neighbors` parameters to merge. | +| `k3s_server_post` | `cilium_bgp_lb_cidr` | string | `192.168.31.0/24` | Not required | BGP load balancer IP range | +| `k3s_server_post` | `cilium_exportPodCIDR` | bool | `true` | Not required | Export pod CIDR | +| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble | +| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble | +| `k3s_server_post` | `cilium_mode` | string | `native` | Not required | Inner-node communication mode (choices are `native` and `routed`) | +| `k3s_server_post` | `cluster_cidr` | string | `10.52.0.0/16` | Not required | Inner-cluster IP range | +| `k3s_server_post` | `enable_bpf_masquerade` | bool | `true` | Not required | Use IP masquerading | +| `k3s_server_post` | `kube_proxy_replacement` | bool | `true` | Not required | Replace the native kube-proxy with Cilium | +| `k3s_server_post` | `metal_lb_available_timeout` | string | `240s` | Not required | Wait for MetalLB resources | +| `k3s_server_post` | `metal_lb_ip_range` | string | `192.168.30.80-192.168.30.90` | Not required | MetalLB ip range for load balancer | +| `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB | +| `k3s_server_post` | `metal_lb_mode` | string | `layer2` | Not required | Metallb mode (choices are `bgp` and `layer2`) | +| `k3s_server_post` | `metal_lb_bgp_my_asn` | string | `~` | Not required | BGP ASN configurations | +| `k3s_server_post` | `metal_lb_bgp_peer_asn` | string | `~` | Not required | BGP peer ASN configurations | +| `k3s_server_post` | `metal_lb_bgp_peer_address` | string | `~` | Not required | BGP peer address | +| `lxc` | `custom_reboot_command` | string | `~` | Not required | Command to run on reboot | +| `prereq` | `system_timezone` | string | `null` | Not required | Timezone to be set on all nodes | +| `proxmox_lxc`, `reset_proxmox_lxc` | `proxmox_lxc_ct_ids` | list | ❌ | Required | Proxmox container ID list | +| `raspberrypi` | `state` | string | `present` | Not required | Indicates whether the k3s prerequisites for Raspberry Pi should be set up (possible values are `present` and `absent`) | + + +### Troubleshooting + +Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems + +### Testing the playbook using molecule + +This playbook includes a [molecule](https://molecule.rtfd.io/)-based test setup. +It is run automatically in CI, but you can also run the tests locally. +This might be helpful for quick feedback in a few cases. +You can find more information about it [here](molecule/README.md). + +### Pre-commit Hooks + +This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/) + +## 🌌 Ansible Galaxy + +This collection can now be used in larger ansible projects. + +Instructions: + +- create or modify a file `collections/requirements.yml` in your project + +```yml +collections: + - name: ansible.utils + - name: community.general + - name: ansible.posix + - name: kubernetes.core + - name: https://github.com/techno-tim/k3s-ansible.git + type: git + version: master +``` + +- install via `ansible-galaxy collection install -r ./collections/requirements.yml` +- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc` + +## Thanks 🀝 + +This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas: + +- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible) +- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster) +- [212850a/k3s-ansible](https://github.com/212850a/k3s-ansible) diff --git a/k3s-ansible-copia/ansible.cfg b/k3s-ansible-copia/ansible.cfg new file mode 100644 index 0000000..b36870b --- /dev/null +++ b/k3s-ansible-copia/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file diff --git a/k3s-ansible-copia/ansible.example.cfg b/k3s-ansible-copia/ansible.example.cfg new file mode 100644 index 0000000..b36870b --- /dev/null +++ b/k3s-ansible-copia/ansible.example.cfg @@ -0,0 +1,2 @@ +[defaults] +inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file diff --git a/k3s-ansible-copia/collections/requirements.yml b/k3s-ansible-copia/collections/requirements.yml new file mode 100644 index 0000000..0d176b4 --- /dev/null +++ b/k3s-ansible-copia/collections/requirements.yml @@ -0,0 +1,6 @@ +--- +collections: + - name: ansible.utils + - name: community.general + - name: ansible.posix + - name: kubernetes.core diff --git a/k3s-ansible-copia/deploy.sh b/k3s-ansible-copia/deploy.sh new file mode 100755 index 0000000..8f702d6 --- /dev/null +++ b/k3s-ansible-copia/deploy.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +ansible-playbook site.yml diff --git a/k3s-ansible-copia/error b/k3s-ansible-copia/error new file mode 100644 index 0000000..a04c45a --- /dev/null +++ b/k3s-ansible-copia/error @@ -0,0 +1,2339 @@ +mar 12 16:33:27 CASCA k3s[293286]: I0312 16:33:27.059914 293286 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:33:27 CASCA k3s[293286]: I0312 16:33:27.059934 293286 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:33:27 CASCA k3s[293286]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:33:27 CASCA k3s[293286]: time="2025-03-12T16:33:27+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:33:27 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:33:27 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:33:27 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 114527 and the job result is failed. +mar 12 16:33:32 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1191. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:33:32 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 114621 and the job result is done. +mar 12 16:33:32 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 114621. +mar 12 16:33:32 CASCA sh[293679]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:33:32 CASCA k3s[293686]: W0312 16:33:32.541730 293686 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:33:32 CASCA k3s[293686]: W0312 16:33:32.542188 293686 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:32 CASCA k3s[293686]: I0312 16:33:32.542231 293686 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:33:32 CASCA k3s[293686]: I0312 16:33:32.543427 293686 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:33:32 CASCA k3s[293686]: I0312 16:33:32.543446 293686 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:33:32 CASCA k3s[293686]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:33:32 CASCA k3s[293686]: time="2025-03-12T16:33:32+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:33:32 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:33:32 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:33:32 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 114621 and the job result is failed. +mar 12 16:33:37 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1192. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:33:37 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 114715 and the job result is done. +mar 12 16:33:37 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 114715. +mar 12 16:33:37 CASCA sh[294117]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:33:37 CASCA k3s[294128]: time="2025-03-12T16:33:37+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:33:37 CASCA k3s[294128]: time="2025-03-12T16:33:37+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:33:37 CASCA k3s[294128]: time="2025-03-12T16:33:37+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:33:37 CASCA k3s[294128]: time="2025-03-12T16:33:37+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:33:37 CASCA k3s[294128]: time="2025-03-12T16:33:37+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:33:37 CASCA k3s[294128]: time="2025-03-12T16:33:37+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:33:38 CASCA k3s[294128]: W0312 16:33:38.053193 294128 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:38 CASCA k3s[294128]: I0312 16:33:38.053734 294128 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:33:38 CASCA k3s[294128]: W0312 16:33:38.054080 294128 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:33:38 CASCA k3s[294128]: I0312 16:33:38.054960 294128 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:33:38 CASCA k3s[294128]: I0312 16:33:38.054974 294128 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:33:38 CASCA k3s[294128]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:33:38 CASCA k3s[294128]: time="2025-03-12T16:33:38+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:33:38 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:33:38 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:33:38 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 114715 and the job result is failed. +mar 12 16:33:43 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1193. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:33:43 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 114809 and the job result is done. +mar 12 16:33:43 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 114809. +mar 12 16:33:43 CASCA sh[294592]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:33:43 CASCA k3s[294599]: W0312 16:33:43.564110 294599 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:33:43 CASCA k3s[294599]: W0312 16:33:43.564646 294599 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:43 CASCA k3s[294599]: I0312 16:33:43.564753 294599 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:33:43 CASCA k3s[294599]: I0312 16:33:43.566007 294599 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:33:43 CASCA k3s[294599]: I0312 16:33:43.566030 294599 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:33:43 CASCA k3s[294599]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:33:43 CASCA k3s[294599]: time="2025-03-12T16:33:43+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:33:43 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:33:43 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:33:43 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 114809 and the job result is failed. +mar 12 16:33:48 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1194. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:33:48 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 114903 and the job result is done. +mar 12 16:33:48 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 114903. +mar 12 16:33:48 CASCA sh[294930]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:33:48 CASCA k3s[294940]: time="2025-03-12T16:33:48+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:33:48 CASCA k3s[294940]: time="2025-03-12T16:33:48+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:33:48 CASCA k3s[294940]: time="2025-03-12T16:33:48+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:33:48 CASCA k3s[294940]: time="2025-03-12T16:33:48+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:33:48 CASCA k3s[294940]: time="2025-03-12T16:33:48+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:33:48 CASCA k3s[294940]: time="2025-03-12T16:33:48+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:33:49 CASCA k3s[294940]: W0312 16:33:49.054677 294940 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:33:49 CASCA k3s[294940]: W0312 16:33:49.055025 294940 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:33:49 CASCA k3s[294940]: I0312 16:33:49.055856 294940 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=info msg="Run: k3s kubectl" +mar 12 16:33:49 CASCA k3s[294940]: I0312 16:33:49.057084 294940 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:33:49 CASCA k3s[294940]: I0312 16:33:49.057104 294940 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:33:49 CASCA k3s[294940]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:33:49 CASCA k3s[294940]: time="2025-03-12T16:33:49+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:33:49 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:33:49 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:33:49 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 114903 and the job result is failed. +mar 12 16:33:54 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1195. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:33:54 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 114997 and the job result is done. +mar 12 16:33:54 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 114997. +mar 12 16:33:54 CASCA sh[295357]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:33:54 CASCA k3s[295365]: W0312 16:33:54.580835 295365 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:33:54 CASCA k3s[295365]: W0312 16:33:54.581307 295365 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:33:54 CASCA k3s[295365]: I0312 16:33:54.581333 295365 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:33:54 CASCA k3s[295365]: I0312 16:33:54.582544 295365 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:33:54 CASCA k3s[295365]: I0312 16:33:54.582565 295365 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:33:54 CASCA k3s[295365]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:33:54 CASCA k3s[295365]: time="2025-03-12T16:33:54+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:33:54 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:33:54 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:33:54 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 114997 and the job result is failed. +mar 12 16:33:59 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1196. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:33:59 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115091 and the job result is done. +mar 12 16:33:59 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115091. +mar 12 16:33:59 CASCA sh[295816]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:33:59 CASCA k3s[295825]: time="2025-03-12T16:33:59+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:33:59 CASCA k3s[295825]: time="2025-03-12T16:33:59+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:33:59 CASCA k3s[295825]: time="2025-03-12T16:33:59+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:33:59 CASCA k3s[295825]: time="2025-03-12T16:33:59+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:33:59 CASCA k3s[295825]: time="2025-03-12T16:33:59+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:33:59 CASCA k3s[295825]: time="2025-03-12T16:33:59+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:00 CASCA k3s[295825]: W0312 16:34:00.043113 295825 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:00 CASCA k3s[295825]: W0312 16:34:00.043461 295825 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:00 CASCA k3s[295825]: I0312 16:34:00.043592 295825 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:00 CASCA k3s[295825]: I0312 16:34:00.044831 295825 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:00 CASCA k3s[295825]: I0312 16:34:00.044845 295825 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:00 CASCA k3s[295825]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:00 CASCA k3s[295825]: time="2025-03-12T16:34:00+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:00 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:00 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:00 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115091 and the job result is failed. +mar 12 16:34:03 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115185 and the job result is done. +mar 12 16:34:03 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115185. +mar 12 16:34:03 CASCA sh[296083]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:03 CASCA k3s[296088]: W0312 16:34:03.971970 296088 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:03 CASCA k3s[296088]: W0312 16:34:03.972430 296088 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:03 CASCA k3s[296088]: I0312 16:34:03.972518 296088 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:03 CASCA k3s[296088]: I0312 16:34:03.973720 296088 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:03 CASCA k3s[296088]: I0312 16:34:03.973739 296088 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:03 CASCA k3s[296088]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:03 CASCA k3s[296088]: time="2025-03-12T16:34:03+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:03 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:03 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:03 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115185 and the job result is failed. +mar 12 16:34:09 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1197. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:09 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115279 and the job result is done. +mar 12 16:34:09 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115279. +mar 12 16:34:09 CASCA sh[296535]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:09 CASCA k3s[296543]: W0312 16:34:09.307627 296543 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:09 CASCA k3s[296543]: W0312 16:34:09.307971 296543 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:09 CASCA k3s[296543]: I0312 16:34:09.308180 296543 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:09 CASCA k3s[296543]: I0312 16:34:09.309421 296543 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:09 CASCA k3s[296543]: I0312 16:34:09.309437 296543 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=info msg="Run: k3s kubectl" +mar 12 16:34:09 CASCA k3s[296543]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:09 CASCA k3s[296543]: time="2025-03-12T16:34:09+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:09 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:09 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:09 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115279 and the job result is failed. +mar 12 16:34:14 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1198. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:14 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115373 and the job result is done. +mar 12 16:34:14 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115373. +mar 12 16:34:14 CASCA sh[296970]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:14 CASCA k3s[296980]: W0312 16:34:14.835858 296980 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:14 CASCA k3s[296980]: W0312 16:34:14.836194 296980 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:14 CASCA k3s[296980]: I0312 16:34:14.837054 296980 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=info msg="Run: k3s kubectl" +mar 12 16:34:14 CASCA k3s[296980]: I0312 16:34:14.838251 296980 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:14 CASCA k3s[296980]: I0312 16:34:14.838274 296980 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:14 CASCA k3s[296980]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:14 CASCA k3s[296980]: time="2025-03-12T16:34:14+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:14 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:14 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:14 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115373 and the job result is failed. +mar 12 16:34:20 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1199. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:20 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115467 and the job result is done. +mar 12 16:34:20 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115467. +mar 12 16:34:20 CASCA sh[297349]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:20 CASCA k3s[297356]: W0312 16:34:20.345215 297356 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:20 CASCA k3s[297356]: W0312 16:34:20.345563 297356 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:20 CASCA k3s[297356]: I0312 16:34:20.345751 297356 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:20 CASCA k3s[297356]: I0312 16:34:20.346957 297356 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:20 CASCA k3s[297356]: I0312 16:34:20.346976 297356 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:20 CASCA k3s[297356]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:20 CASCA k3s[297356]: time="2025-03-12T16:34:20+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:20 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:20 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:20 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115467 and the job result is failed. +mar 12 16:34:25 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1200. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:25 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115561 and the job result is done. +mar 12 16:34:25 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115561. +mar 12 16:34:25 CASCA sh[297733]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:25 CASCA k3s[297740]: W0312 16:34:25.856973 297740 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:25 CASCA k3s[297740]: W0312 16:34:25.857380 297740 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:25 CASCA k3s[297740]: I0312 16:34:25.858380 297740 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=info msg="Run: k3s kubectl" +mar 12 16:34:25 CASCA k3s[297740]: I0312 16:34:25.859623 297740 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:25 CASCA k3s[297740]: I0312 16:34:25.859637 297740 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:25 CASCA k3s[297740]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:25 CASCA k3s[297740]: time="2025-03-12T16:34:25+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:25 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:25 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:25 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115561 and the job result is failed. +mar 12 16:34:31 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1201. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:31 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115655 and the job result is done. +mar 12 16:34:31 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115655. +mar 12 16:34:31 CASCA sh[298138]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:31 CASCA k3s[298148]: W0312 16:34:31.292453 298148 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:31 CASCA k3s[298148]: W0312 16:34:31.292799 298148 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:31 CASCA k3s[298148]: I0312 16:34:31.292948 298148 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:31 CASCA k3s[298148]: I0312 16:34:31.294174 298148 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:31 CASCA k3s[298148]: I0312 16:34:31.294191 298148 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:31 CASCA k3s[298148]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:31 CASCA k3s[298148]: time="2025-03-12T16:34:31+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:31 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:31 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:31 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115655 and the job result is failed. +mar 12 16:34:36 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1202. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:36 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115749 and the job result is done. +mar 12 16:34:36 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115749. +mar 12 16:34:36 CASCA sh[298547]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:36 CASCA k3s[298557]: W0312 16:34:36.831518 298557 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:36 CASCA k3s[298557]: W0312 16:34:36.831930 298557 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:36 CASCA k3s[298557]: I0312 16:34:36.832005 298557 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:36 CASCA k3s[298557]: I0312 16:34:36.833202 298557 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:36 CASCA k3s[298557]: I0312 16:34:36.833219 298557 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:36 CASCA k3s[298557]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:36 CASCA k3s[298557]: time="2025-03-12T16:34:36+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:36 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:36 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:36 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115749 and the job result is failed. +mar 12 16:34:42 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1203. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:42 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115843 and the job result is done. +mar 12 16:34:42 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115843. +mar 12 16:34:42 CASCA sh[298893]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:42 CASCA k3s[298900]: W0312 16:34:42.317495 298900 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:42 CASCA k3s[298900]: W0312 16:34:42.317968 298900 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:42 CASCA k3s[298900]: I0312 16:34:42.318096 298900 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:42 CASCA k3s[298900]: I0312 16:34:42.319313 298900 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:42 CASCA k3s[298900]: I0312 16:34:42.319326 298900 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:42 CASCA k3s[298900]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:42 CASCA k3s[298900]: time="2025-03-12T16:34:42+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:42 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:42 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:42 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115843 and the job result is failed. +mar 12 16:34:47 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1204. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:47 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 115937 and the job result is done. +mar 12 16:34:47 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 115937. +mar 12 16:34:47 CASCA sh[299237]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:47 CASCA k3s[299241]: W0312 16:34:47.794187 299241 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:47 CASCA k3s[299241]: W0312 16:34:47.794640 299241 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:47 CASCA k3s[299241]: I0312 16:34:47.794699 299241 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:47 CASCA k3s[299241]: I0312 16:34:47.795958 299241 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:47 CASCA k3s[299241]: I0312 16:34:47.795975 299241 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:47 CASCA k3s[299241]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:47 CASCA k3s[299241]: time="2025-03-12T16:34:47+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:47 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:47 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:47 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 115937 and the job result is failed. +mar 12 16:34:53 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1205. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:53 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116031 and the job result is done. +mar 12 16:34:53 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116031. +mar 12 16:34:53 CASCA sh[299546]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:53 CASCA k3s[299553]: I0312 16:34:53.313071 299553 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:53 CASCA k3s[299553]: W0312 16:34:53.313285 299553 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:53 CASCA k3s[299553]: W0312 16:34:53.313674 299553 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:53 CASCA k3s[299553]: I0312 16:34:53.314512 299553 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:53 CASCA k3s[299553]: I0312 16:34:53.314537 299553 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:53 CASCA k3s[299553]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:53 CASCA k3s[299553]: time="2025-03-12T16:34:53+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:53 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:53 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:53 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116031 and the job result is failed. +mar 12 16:34:58 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1206. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:34:58 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116125 and the job result is done. +mar 12 16:34:58 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116125. +mar 12 16:34:58 CASCA sh[299943]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:34:58 CASCA k3s[299952]: W0312 16:34:58.829118 299952 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:34:58 CASCA k3s[299952]: W0312 16:34:58.829576 299952 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:34:58 CASCA k3s[299952]: I0312 16:34:58.829643 299952 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:34:58 CASCA k3s[299952]: I0312 16:34:58.830859 299952 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:34:58 CASCA k3s[299952]: I0312 16:34:58.830875 299952 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:34:58 CASCA k3s[299952]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:34:58 CASCA k3s[299952]: time="2025-03-12T16:34:58+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:34:58 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:34:58 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:34:58 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116125 and the job result is failed. +mar 12 16:35:04 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1207. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:04 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116219 and the job result is done. +mar 12 16:35:04 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116219. +mar 12 16:35:04 CASCA sh[300339]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:04 CASCA k3s[300346]: W0312 16:35:04.287210 300346 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:04 CASCA k3s[300346]: W0312 16:35:04.287660 300346 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:04 CASCA k3s[300346]: I0312 16:35:04.287792 300346 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:04 CASCA k3s[300346]: I0312 16:35:04.289169 300346 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:04 CASCA k3s[300346]: I0312 16:35:04.289196 300346 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:04 CASCA k3s[300346]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:04 CASCA k3s[300346]: time="2025-03-12T16:35:04+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:04 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:04 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:04 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116219 and the job result is failed. +mar 12 16:35:09 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1208. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:09 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116313 and the job result is done. +mar 12 16:35:09 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116313. +mar 12 16:35:09 CASCA sh[300731]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:09 CASCA k3s[300739]: W0312 16:35:09.834674 300739 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:09 CASCA k3s[300739]: W0312 16:35:09.835024 300739 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:09 CASCA k3s[300739]: I0312 16:35:09.835170 300739 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:09 CASCA k3s[300739]: I0312 16:35:09.836389 300739 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:09 CASCA k3s[300739]: I0312 16:35:09.836409 300739 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:09 CASCA k3s[300739]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:09 CASCA k3s[300739]: time="2025-03-12T16:35:09+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:09 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:09 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:09 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116313 and the job result is failed. +mar 12 16:35:15 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1209. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:15 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116407 and the job result is done. +mar 12 16:35:15 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116407. +mar 12 16:35:15 CASCA sh[301001]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:15 CASCA k3s[301005]: W0312 16:35:15.277839 301005 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:15 CASCA k3s[301005]: W0312 16:35:15.278187 301005 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:15 CASCA k3s[301005]: I0312 16:35:15.278304 301005 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:15 CASCA k3s[301005]: I0312 16:35:15.279488 301005 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:15 CASCA k3s[301005]: I0312 16:35:15.279552 301005 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:15 CASCA k3s[301005]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:15 CASCA k3s[301005]: time="2025-03-12T16:35:15+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:15 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:15 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:15 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116407 and the job result is failed. +mar 12 16:35:20 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1210. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:20 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116501 and the job result is done. +mar 12 16:35:20 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116501. +mar 12 16:35:20 CASCA sh[301218]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:20 CASCA k3s[301225]: W0312 16:35:20.847052 301225 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:20 CASCA k3s[301225]: W0312 16:35:20.847398 301225 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:20 CASCA k3s[301225]: I0312 16:35:20.847542 301225 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:20 CASCA k3s[301225]: I0312 16:35:20.848722 301225 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:20 CASCA k3s[301225]: I0312 16:35:20.848739 301225 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:20 CASCA k3s[301225]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:20 CASCA k3s[301225]: time="2025-03-12T16:35:20+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:20 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:20 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:20 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116501 and the job result is failed. +mar 12 16:35:26 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1211. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:26 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116595 and the job result is done. +mar 12 16:35:26 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116595. +mar 12 16:35:26 CASCA sh[301551]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:26 CASCA k3s[301557]: W0312 16:35:26.284751 301557 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:26 CASCA k3s[301557]: W0312 16:35:26.285236 301557 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:26 CASCA k3s[301557]: I0312 16:35:26.285265 301557 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:26 CASCA k3s[301557]: I0312 16:35:26.286462 301557 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:26 CASCA k3s[301557]: I0312 16:35:26.286487 301557 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:26 CASCA k3s[301557]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:26 CASCA k3s[301557]: time="2025-03-12T16:35:26+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:26 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:26 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:26 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116595 and the job result is failed. +mar 12 16:35:31 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1212. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:31 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116689 and the job result is done. +mar 12 16:35:31 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116689. +mar 12 16:35:31 CASCA sh[301968]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:31 CASCA k3s[301978]: W0312 16:35:31.804367 301978 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:31 CASCA k3s[301978]: I0312 16:35:31.804877 301978 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:31 CASCA k3s[301978]: W0312 16:35:31.804884 301978 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:31 CASCA k3s[301978]: I0312 16:35:31.806067 301978 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:31 CASCA k3s[301978]: I0312 16:35:31.806086 301978 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:31 CASCA k3s[301978]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:31 CASCA k3s[301978]: time="2025-03-12T16:35:31+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:31 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:31 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:31 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116689 and the job result is failed. +mar 12 16:35:37 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1213. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:37 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116783 and the job result is done. +mar 12 16:35:37 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116783. +mar 12 16:35:37 CASCA sh[302412]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:37 CASCA k3s[302419]: W0312 16:35:37.306964 302419 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:37 CASCA k3s[302419]: W0312 16:35:37.307299 302419 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:37 CASCA k3s[302419]: I0312 16:35:37.307457 302419 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:37 CASCA k3s[302419]: I0312 16:35:37.308674 302419 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:37 CASCA k3s[302419]: I0312 16:35:37.308687 302419 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:37 CASCA k3s[302419]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:37 CASCA k3s[302419]: time="2025-03-12T16:35:37+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:37 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:37 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:37 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116783 and the job result is failed. +mar 12 16:35:42 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1214. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:42 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116877 and the job result is done. +mar 12 16:35:42 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116877. +mar 12 16:35:42 CASCA sh[302739]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:42 CASCA k3s[302746]: W0312 16:35:42.787970 302746 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:42 CASCA k3s[302746]: W0312 16:35:42.788435 302746 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:42 CASCA k3s[302746]: I0312 16:35:42.788552 302746 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:42 CASCA k3s[302746]: I0312 16:35:42.789773 302746 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:42 CASCA k3s[302746]: I0312 16:35:42.789788 302746 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:42 CASCA k3s[302746]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:42 CASCA k3s[302746]: time="2025-03-12T16:35:42+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:42 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:42 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:42 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116877 and the job result is failed. +mar 12 16:35:48 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1215. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:48 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 116971 and the job result is done. +mar 12 16:35:48 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 116971. +mar 12 16:35:48 CASCA sh[302919]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:48 CASCA k3s[302925]: W0312 16:35:48.329161 302925 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:48 CASCA k3s[302925]: W0312 16:35:48.329565 302925 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:48 CASCA k3s[302925]: I0312 16:35:48.329702 302925 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:48 CASCA k3s[302925]: I0312 16:35:48.330938 302925 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:48 CASCA k3s[302925]: I0312 16:35:48.330953 302925 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:48 CASCA k3s[302925]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:48 CASCA k3s[302925]: time="2025-03-12T16:35:48+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:48 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:48 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:48 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 116971 and the job result is failed. +mar 12 16:35:53 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1216. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:53 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117065 and the job result is done. +mar 12 16:35:53 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117065. +mar 12 16:35:53 CASCA sh[303334]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:53 CASCA k3s[303341]: W0312 16:35:53.775748 303341 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:53 CASCA k3s[303341]: W0312 16:35:53.776216 303341 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:53 CASCA k3s[303341]: I0312 16:35:53.776414 303341 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:53 CASCA k3s[303341]: I0312 16:35:53.777956 303341 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:53 CASCA k3s[303341]: I0312 16:35:53.777983 303341 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:53 CASCA k3s[303341]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:53 CASCA k3s[303341]: time="2025-03-12T16:35:53+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:53 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:53 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:53 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117065 and the job result is failed. +mar 12 16:35:59 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1217. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:35:59 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117159 and the job result is done. +mar 12 16:35:59 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117159. +mar 12 16:35:59 CASCA sh[303731]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:35:59 CASCA k3s[303740]: W0312 16:35:59.333474 303740 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:35:59 CASCA k3s[303740]: W0312 16:35:59.333847 303740 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:35:59 CASCA k3s[303740]: I0312 16:35:59.334728 303740 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=info msg="Run: k3s kubectl" +mar 12 16:35:59 CASCA k3s[303740]: I0312 16:35:59.335953 303740 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:35:59 CASCA k3s[303740]: I0312 16:35:59.335979 303740 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:35:59 CASCA k3s[303740]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:35:59 CASCA k3s[303740]: time="2025-03-12T16:35:59+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:35:59 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:35:59 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:35:59 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117159 and the job result is failed. +mar 12 16:36:04 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1218. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:36:04 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117253 and the job result is done. +mar 12 16:36:04 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117253. +mar 12 16:36:04 CASCA sh[304097]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:36:04 CASCA k3s[304102]: W0312 16:36:04.793723 304102 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:36:04 CASCA k3s[304102]: W0312 16:36:04.794088 304102 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:04 CASCA k3s[304102]: I0312 16:36:04.794164 304102 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:36:04 CASCA k3s[304102]: I0312 16:36:04.795363 304102 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:36:04 CASCA k3s[304102]: I0312 16:36:04.795376 304102 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:36:04 CASCA k3s[304102]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:36:04 CASCA k3s[304102]: time="2025-03-12T16:36:04+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:36:04 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:36:04 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:36:04 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117253 and the job result is failed. +mar 12 16:36:10 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1219. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:36:10 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117347 and the job result is done. +mar 12 16:36:10 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117347. +mar 12 16:36:10 CASCA sh[304421]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:36:10 CASCA k3s[304427]: W0312 16:36:10.320641 304427 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:36:10 CASCA k3s[304427]: W0312 16:36:10.321156 304427 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:10 CASCA k3s[304427]: I0312 16:36:10.321301 304427 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:36:10 CASCA k3s[304427]: I0312 16:36:10.322525 304427 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:36:10 CASCA k3s[304427]: I0312 16:36:10.322551 304427 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:36:10 CASCA k3s[304427]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:36:10 CASCA k3s[304427]: time="2025-03-12T16:36:10+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:36:10 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:36:10 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:36:10 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117347 and the job result is failed. +mar 12 16:36:15 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1220. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:36:15 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117441 and the job result is done. +mar 12 16:36:15 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117441. +mar 12 16:36:15 CASCA sh[304761]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:36:15 CASCA k3s[304768]: W0312 16:36:15.830300 304768 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:36:15 CASCA k3s[304768]: W0312 16:36:15.830666 304768 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:15 CASCA k3s[304768]: I0312 16:36:15.830819 304768 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:36:15 CASCA k3s[304768]: I0312 16:36:15.832085 304768 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:36:15 CASCA k3s[304768]: I0312 16:36:15.832098 304768 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Wrote kubeconfig /etc/rancher/k3s/k3s.yaml" +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=info msg="Run: k3s kubectl" +mar 12 16:36:15 CASCA k3s[304768]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:36:15 CASCA k3s[304768]: time="2025-03-12T16:36:15+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:36:15 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:36:15 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:36:15 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117441 and the job result is failed. +mar 12 16:36:21 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1221. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:36:21 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117535 and the job result is done. +mar 12 16:36:21 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117535. +mar 12 16:36:21 CASCA sh[305102]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:36:21 CASCA k3s[305109]: W0312 16:36:21.311329 305109 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:36:21 CASCA k3s[305109]: W0312 16:36:21.311673 305109 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:36:21 CASCA k3s[305109]: I0312 16:36:21.311866 305109 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:36:21 CASCA k3s[305109]: I0312 16:36:21.313086 305109 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:36:21 CASCA k3s[305109]: I0312 16:36:21.313105 305109 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:36:21 CASCA k3s[305109]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:36:21 CASCA k3s[305109]: time="2025-03-12T16:36:21+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:36:21 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:36:21 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:36:21 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117535 and the job result is failed. +mar 12 16:36:26 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1222. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:36:26 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117629 and the job result is done. +mar 12 16:36:26 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117629. +mar 12 16:36:26 CASCA sh[305425]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:36:26 CASCA k3s[305432]: W0312 16:36:26.788551 305432 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:36:26 CASCA k3s[305432]: W0312 16:36:26.789039 305432 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:26 CASCA k3s[305432]: I0312 16:36:26.789065 305432 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:36:26 CASCA k3s[305432]: I0312 16:36:26.790249 305432 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:36:26 CASCA k3s[305432]: I0312 16:36:26.790268 305432 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:36:26 CASCA k3s[305432]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:36:26 CASCA k3s[305432]: time="2025-03-12T16:36:26+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:36:26 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:36:26 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:36:26 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117629 and the job result is failed. +mar 12 16:36:32 CASCA systemd[1]: k3s.service: Scheduled restart job, restart counter is at 1223. +β–‘β–‘ Subject: Automatic restarting of a unit has been scheduled +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ Automatic restarting of the unit k3s.service has been scheduled, as the result for +β–‘β–‘ the configured Restart= setting for the unit. +mar 12 16:36:32 CASCA systemd[1]: Stopped k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A stop job for unit k3s.service has finished +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A stop job for unit k3s.service has finished. +β–‘β–‘ +β–‘β–‘ The job identifier is 117723 and the job result is done. +mar 12 16:36:32 CASCA systemd[1]: Starting k3s.service - Lightweight Kubernetes... +β–‘β–‘ Subject: A start job for unit k3s.service has begun execution +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has begun execution. +β–‘β–‘ +β–‘β–‘ The job identifier is 117723. +mar 12 16:36:32 CASCA sh[305771]: + /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Starting k3s v1.31.6+k3s1 (6ab750f9)" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Configuring sqlite3 database connection pooling: maxIdleConns=2, maxOpenConns=0, connMaxLifetime=0s" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Configuring database table schema and indexes, this may take a moment..." +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Database tables and indexes are up to date" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Kine available at unix://kine.sock" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Reconciling bootstrap data between datastore and disk" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --egress-selector-config-file=/var/lib/rancher/k3s/server/etc/egress-selector-config.yaml --enable-admission-plugins=NodeRestriction --enable-aggregator-routing=true --enable-bootstrap-token-auth=true --etcd-servers=unix://kine.sock --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Running kube-scheduler --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --leader-elect=false --profiling=false --secure-port=10259" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Waiting for API server to become available" +mar 12 16:36:32 CASCA k3s[305779]: W0312 16:36:32.314610 305779 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Running kube-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --bind-address=127.0.0.1 --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.nochain.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/k3s/server/tls/server-ca.nochain.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/k3s/server/tls/server-ca.key --configure-cloud-routes=false --controllers=*,tokencleaner,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig --leader-elect=false --profiling=false --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.current.key --service-cluster-ip-range=10.43.0.0/16 --use-service-account-credentials=true" +mar 12 16:36:32 CASCA k3s[305779]: W0312 16:36:32.315034 305779 registry.go:256] calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags +mar 12 16:36:32 CASCA k3s[305779]: I0312 16:36:32.315101 305779 options.go:228] external host was not specified, using 192.168.1.133 +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Running cloud-controller-manager --allocate-node-cidrs=true --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 --cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 --configure-cloud-routes=false --controllers=*,-route --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --leader-elect=false --leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Server node token is available at /var/lib/rancher/k3s/server/token" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="To join server node to cluster: k3s server -s https://192.168.1.133:6443 -t ${SERVER_NODE_TOKEN}" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="Agent node token is available at /var/lib/rancher/k3s/server/agent-token" +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=info msg="To join agent node to cluster: k3s agent -s https://192.168.1.133:6443 -t ${AGENT_NODE_TOKEN}" +mar 12 16:36:32 CASCA k3s[305779]: I0312 16:36:32.316330 305779 server.go:150] Version: v1.31.6+k3s1 +mar 12 16:36:32 CASCA k3s[305779]: I0312 16:36:32.316345 305779 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" +mar 12 16:36:32 CASCA k3s[305779]: Error: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use +mar 12 16:36:32 CASCA k3s[305779]: time="2025-03-12T16:36:32+01:00" level=error msg="apiserver exited: failed to create listener: failed to listen on 127.0.0.1:6444: listen tcp 127.0.0.1:6444: bind: address already in use" +mar 12 16:36:32 CASCA systemd[1]: k3s.service: Main process exited, code=exited, status=1/FAILURE +β–‘β–‘ Subject: Unit process exited +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ An ExecStart= process belonging to unit k3s.service has exited. +β–‘β–‘ +β–‘β–‘ The process' exit code is 'exited' and its exit status is 1. +mar 12 16:36:32 CASCA systemd[1]: k3s.service: Failed with result 'exit-code'. +β–‘β–‘ Subject: Unit failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ The unit k3s.service has entered the 'failed' state with result 'exit-code'. +mar 12 16:36:32 CASCA systemd[1]: Failed to start k3s.service - Lightweight Kubernetes. +β–‘β–‘ Subject: A start job for unit k3s.service has failed +β–‘β–‘ Defined-By: systemd +β–‘β–‘ Support: https://www.debian.org/support +β–‘β–‘ +β–‘β–‘ A start job for unit k3s.service has finished with a failure. +β–‘β–‘ +β–‘β–‘ The job identifier is 117723 and the job result is failed. diff --git a/k3s-ansible-copia/example/deployment.yml b/k3s-ansible-copia/example/deployment.yml new file mode 100644 index 0000000..ad875ee --- /dev/null +++ b/k3s-ansible-copia/example/deployment.yml @@ -0,0 +1,20 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + replicas: 3 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:alpine + ports: + - containerPort: 80 diff --git a/k3s-ansible-copia/example/service.yml b/k3s-ansible-copia/example/service.yml new file mode 100644 index 0000000..a309465 --- /dev/null +++ b/k3s-ansible-copia/example/service.yml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx +spec: + ipFamilyPolicy: PreferDualStack + selector: + app: nginx + ports: + - port: 80 + targetPort: 80 + type: LoadBalancer diff --git a/k3s-ansible-copia/fk b/k3s-ansible-copia/fk new file mode 100644 index 0000000..bab8d51 --- /dev/null +++ b/k3s-ansible-copia/fk @@ -0,0 +1 @@ +cont diff --git a/k3s-ansible-copia/galaxy.yml b/k3s-ansible-copia/galaxy.yml new file mode 100644 index 0000000..0f9b196 --- /dev/null +++ b/k3s-ansible-copia/galaxy.yml @@ -0,0 +1,81 @@ +### REQUIRED +# The namespace of the collection. This can be a company/brand/organization or product namespace under which all +# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with +# underscores or numbers and cannot contain consecutive underscores +namespace: techno_tim + +# The name of the collection. Has the same character restrictions as 'namespace' +name: k3s_ansible + +# The version of the collection. Must be compatible with semantic versioning +version: 1.0.0 + +# The path to the Markdown (.md) readme file. This path is relative to the root of the collection +readme: README.md + +# A list of the collection's content authors. Can be just the name or in the format 'Full Name (url) +# @nicks:irc/im.site#channel' +authors: +- your name + + +### OPTIONAL but strongly recommended +# A short summary description of the collection +description: > + The easiest way to bootstrap a self-hosted High Availability Kubernetes + cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB, + and more. + +# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only +# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file' +license: +- Apache-2.0 + + +# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character +# requirements as 'namespace' and 'name' +tags: + - etcd + - high-availability + - k8s + - k3s + - k3s-cluster + - kube-vip + - kubernetes + - metallb + - rancher + +# Collections that this collection requires to be installed for it to be usable. The key of the dict is the +# collection label 'namespace.name'. The value is a version range +# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version +# range specifiers can be set and are separated by ',' +dependencies: + ansible.utils: '*' + ansible.posix: '*' + community.general: '*' + kubernetes.core: '*' + +# The URL of the originating SCM repository +repository: https://github.com/techno-tim/k3s-ansible + +# The URL to any online docs +documentation: https://github.com/techno-tim/k3s-ansible + +# The URL to the homepage of the collection/project +homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM + +# The URL to the collection issue tracker +issues: https://github.com/techno-tim/k3s-ansible/issues + +# A list of file glob-like patterns used to filter any files or directories that should not be included in the build +# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This +# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry', +# and '.git' are always filtered. Mutually exclusive with 'manifest' +build_ignore: [] + +# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a +# list of MANIFEST.in style +# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key +# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive +# with 'build_ignore' +# manifest: null diff --git a/k3s-ansible-copia/inventory/.gitignore b/k3s-ansible-copia/inventory/.gitignore new file mode 100644 index 0000000..ddcc0d1 --- /dev/null +++ b/k3s-ansible-copia/inventory/.gitignore @@ -0,0 +1,3 @@ +/* +!.gitignore +!sample/ diff --git a/k3s-ansible-copia/inventory/sample/group_vars/all.yml b/k3s-ansible-copia/inventory/sample/group_vars/all.yml new file mode 100644 index 0000000..01b1fe9 --- /dev/null +++ b/k3s-ansible-copia/inventory/sample/group_vars/all.yml @@ -0,0 +1,171 @@ +--- +k3s_version: v1.30.2+k3s2 +# this is the user that has ssh access to these machines +ansible_user: ansibleuser +systemd_dir: /etc/systemd/system + +# Set your timezone +system_timezone: "Your/Timezone" + +# interface which will be used for flannel +flannel_iface: "eth0" + +# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about +# calico_iface: "eth0" +calico_ebpf: false # use eBPF dataplane instead of iptables +calico_tag: "v3.28.0" # calico version tag + +# uncomment cilium_iface to use cilium cni instead of flannel or calico +# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel +# cilium_iface: "eth0" +cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed +cilium_tag: "v1.16.0" # cilium version tag +cilium_hubble: true # enable hubble observability relay and ui + +# if using calico or cilium, you may specify the cluster pod cidr pool +cluster_cidr: "10.52.0.0/16" + +# enable cilium bgp control plane for lb services and pod cidrs. disables metallb. +cilium_bgp: false + +# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true. +cilium_bgp_my_asn: "64513" +cilium_bgp_peer_asn: "64512" +cilium_bgp_peer_address: "192.168.30.1" +cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam + +# apiserver_endpoint is virtual ip-address which will be configured on each master +apiserver_endpoint: "192.168.30.222" + +# k3s_token is required masters can talk together securely +# this token should be alpha numeric only +k3s_token: "some-SUPER-DEDEUPER-secret-password" + +# The IP on which the node is reachable in the cluster. +# Here, a sensible default is provided, you can still override +# it for each of your hosts, though. +k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}" + +# Disable the taint manually by setting: k3s_master_taint = false +k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}" + +# these arguments are recommended for servers as well as agents: +extra_args: >- + {{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }} + --node-ip={{ k3s_node_ip }} + +# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }} +# the contents of the if block is also required if using calico or cilium +extra_server_args: >- + {{ extra_args }} + {{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }} + {% if calico_iface is defined or cilium_iface is defined %} + --flannel-backend=none + --disable-network-policy + --cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }} + {% endif %} + --tls-san {{ apiserver_endpoint }} + --disable servicelb + --disable traefik + +extra_agent_args: >- + {{ extra_args }} + +# image tag for kube-vip +kube_vip_tag_version: "v0.8.2" + +# tag for kube-vip-cloud-provider manifest +# kube_vip_cloud_provider_tag_version: "main" + +# kube-vip ip range for load balancer +# (uncomment to use kube-vip for services instead of MetalLB) +# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90" + +# metallb type frr or native +metal_lb_type: "native" + +# metallb mode layer2 or bgp +metal_lb_mode: "layer2" + +# bgp options +# metal_lb_bgp_my_asn: "64513" +# metal_lb_bgp_peer_asn: "64512" +# metal_lb_bgp_peer_address: "192.168.30.1" + +# image tag for metal lb +metal_lb_speaker_tag_version: "v0.14.8" +metal_lb_controller_tag_version: "v0.14.8" + +# metallb ip range for load balancer +metal_lb_ip_range: "192.168.30.80-192.168.30.90" + +# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes +# in your hosts.ini file. +# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this. +# Most notably, your containers must be privileged, and must not have nesting set to true. +# Please note this script disables most of the security of lxc containers, with the trade off being that lxc +# containers are significantly more resource efficient compared to full VMs. +# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this. +# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of +# VMs would use a significant portion of your available resources. +proxmox_lxc_configure: false +# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host, +# set this value to some-user +proxmox_lxc_ssh_user: root +# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes +proxmox_lxc_ct_ids: + - 200 + - 201 + - 202 + - 203 + - 204 + +# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache +# (harbor / nexus / docker's official registry / etc). +# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub), +# or air-gapped environments where your nodes don't have internet access after the initial setup +# (which is still needed for downloading the k3s binary and such). +# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry +custom_registries: false +# The registries can be authenticated or anonymous, depending on your registry server configuration. +# If they allow anonymous access, simply remove the following bit from custom_registries_yaml +# configs: +# "registry.domain.com": +# auth: +# username: yourusername +# password: yourpassword +# The following is an example that pulls all images used in this playbook through your private registries. +# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets +# in your deployments. +# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images, +# you can just remove those from the mirrors: section. +custom_registries_yaml: | + mirrors: + docker.io: + endpoint: + - "https://registry.domain.com/v2/dockerhub" + quay.io: + endpoint: + - "https://registry.domain.com/v2/quayio" + ghcr.io: + endpoint: + - "https://registry.domain.com/v2/ghcrio" + registry.domain.com: + endpoint: + - "https://registry.domain.com" + + configs: + "registry.domain.com": + auth: + username: yourusername + password: yourpassword + +# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command. +# Uncomment if you need a custom reboot command +# custom_reboot_command: /usr/sbin/shutdown -r now + +# Only enable and configure these if you access the internet through a proxy +# proxy_env: +# HTTP_PROXY: "http://proxy.domain.local:3128" +# HTTPS_PROXY: "http://proxy.domain.local:3128" +# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" diff --git a/k3s-ansible-copia/inventory/sample/group_vars/proxmox.yml b/k3s-ansible-copia/inventory/sample/group_vars/proxmox.yml new file mode 100644 index 0000000..ea1759b --- /dev/null +++ b/k3s-ansible-copia/inventory/sample/group_vars/proxmox.yml @@ -0,0 +1,2 @@ +--- +ansible_user: '{{ proxmox_lxc_ssh_user }}' diff --git a/k3s-ansible-copia/inventory/sample/hosts.ini b/k3s-ansible-copia/inventory/sample/hosts.ini new file mode 100644 index 0000000..7045423 --- /dev/null +++ b/k3s-ansible-copia/inventory/sample/hosts.ini @@ -0,0 +1,17 @@ +[master] +192.168.30.38 +192.168.30.39 +192.168.30.40 + +[node] +192.168.30.41 +192.168.30.42 + +# only required if proxmox_lxc_configure: true +# must contain all proxmox instances that have a master or worker node +# [proxmox] +# 192.168.30.43 + +[k3s_cluster:children] +master +node diff --git a/k3s-ansible-copia/k3s.crt b/k3s-ansible-copia/k3s.crt new file mode 100644 index 0000000..e69de29 diff --git a/k3s-ansible-copia/k3s_ca.crt b/k3s-ansible-copia/k3s_ca.crt new file mode 100644 index 0000000..e69de29 diff --git a/k3s-ansible-copia/kubeconfig b/k3s-ansible-copia/kubeconfig new file mode 100644 index 0000000..701d179 --- /dev/null +++ b/k3s-ansible-copia/kubeconfig @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTkRJek1UZ3hNalF3SGhjTk1qVXdNekU0TVRjeE5USTBXaGNOTXpVd016RTJNVGN4TlRJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTkRJek1UZ3hNalF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRdXgzUzZOdUJ0bXExbzhIaFFkL0pYK3BLdm1UMEpMSkNWdFBqNjNkWFkKR3lmSnlDM3dLazdIZzNGMS90eExnSFRUUHRmUm56b0ZEdGNPZU5xWEpUejFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWJUTnRFL0JUUmpIZ1ljbEJkRm9QCkVhT3JsT2N3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQU9ObWx5QUxXeklhTkFoZ1BRMlVtb0tmdmF3V3IrNlAKaG5rQkhVTVV2TTcrQWlCLzJsSWJyZzV3TjJwMC9RY0duWVllcEppbzF2ZHRjTHNmYmhVMm5FbndFZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: https://192.168.1.222:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJZmlmRjE3UDRVRFV3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOelF5TXpFNE1USTBNQjRYRFRJMU1ETXhPREUzTVRVeU5Gb1hEVEkyTURNeApPREUzTVRVeU5Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJDK3AwNFhSeWNWMzZQZVQKWWJvVU44OFhXemZHVkZGenFBRzlsdi90cGVVNlNFZEI4YzNBamU3STA2UitnY2FNTjlvekVFS096cFVYcktmVgpMWFJEUlRpalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUjZOM3l6Yyt4OFFIcHo2U3F1UkhBdjBlY0lBREFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQXBoRlloN3FERVJSSmlDcWtYS0hDbXMvTDRDMDVMZVhxT0ZoWUZRNGVBN1lDSUU0KzJKZHFwSHhEV1hkQworU2M4VFBmODFwZTU5Q0t4MnBETllDZjdUcFNjCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTkRJek1UZ3hNalF3SGhjTk1qVXdNekU0TVRjeE5USTBXaGNOTXpVd016RTJNVGN4TlRJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTkRJek1UZ3hNalF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRSi9ndlFjbXphVG5XcHd3VlRYaUdNUGVqeWFnaWhtSUl5SU5iUHNtR0MKWWIxTWRqQ1RYZ3V4OUJrUUhJRWVQMEhvY1FuSEhpeUhGY1orb09iWGVPWlFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWVqZDhzM1BzZkVCNmMra3Fya1J3Ckw5SG5DQUF3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnZjJhekc0VEo5c084NXlPWE12NVNrcWczRTdsMFNTM3kKN2g3QzExcVlmSWdDSUJuTnBrR1d6QjFycVBzdHI0dGlSWGdmVE8vc3lnbXM2cm5WZjcwNzlpRncKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUc4NmJjVlJZYTVTQ2NUZ08zK0xQRHRDb1VRVS9VNm1DUEh3akhTN1BYMWtvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTDZuVGhkSEp4WGZvOTVOaHVoUTN6eGRiTjhaVVVYT29BYjJXLysybDVUcElSMEh4emNDTgo3c2pUcEg2QnhvdzMyak1RUW83T2xSZXNwOVV0ZEVORk9BPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/k3s-ansible-copia/molecule/README.md b/k3s-ansible-copia/molecule/README.md new file mode 100644 index 0000000..aa1845b --- /dev/null +++ b/k3s-ansible-copia/molecule/README.md @@ -0,0 +1,79 @@ +# Test suites for `k3s-ansible` + +This folder contains the [molecule](https://molecule.rtfd.io/)-based test setup for this playbook. + +## Scenarios + +We have these scenarios: + +- **default**: + A 3 control + 2 worker node cluster based very closely on the [sample inventory](../inventory/sample/). +- **ipv6**: + A cluster that is externally accessible via IPv6 ([more information](ipv6/README.md)) + To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node. +- **single_node**: + Very similar to the default scenario, but uses only a single node for all cluster functionality. +- **calico**: + The same as single node, but uses calico cni instead of flannel. +- **cilium**: + The same as single node, but uses cilium cni instead of flannel. +- **kube-vip** + The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB + +## How to execute + +To test on your local machine, follow these steps: + +### System requirements + +Make sure that the following software packages are available on your system: + +- [Python 3](https://www.python.org/downloads) +- [Vagrant](https://www.vagrantup.com/downloads) +- [VirtualBox](https://www.virtualbox.org/wiki/Downloads) + +### Set up VirtualBox networking on Linux and macOS + +_You can safely skip this if you are working on Windows._ + +Furthermore, the test cluster uses the `192.168.30.0/24` subnet which is [not set up by VirtualBox automatically](https://www.virtualbox.org/manual/ch06.html#network_hostonly). +To set the subnet up for use with VirtualBox, please make sure that `/etc/vbox/networks.conf` exists and that it contains this line: + +``` +* 192.168.30.0/24 +* fdad:bad:ba55::/64 +``` + +### Install Python dependencies + +You will get [Molecule, Ansible and a few extra dependencies](../requirements.txt) via [pip](https://pip.pypa.io/). +Usually, it is advisable to work in a [virtual environment](https://docs.python.org/3/tutorial/venv.html) for this: + +```bash +cd /path/to/k3s-ansible + +# Create a virtualenv at ".env". You only need to do this once. +python3 -m venv .env + +# Activate the virtualenv for your current shell session. +# If you start a new session, you will have to repeat this. +source .env/bin/activate + +# Install the required packages into the virtualenv. +# These remain installed across shell sessions. +python3 -m pip install -r requirements.txt +``` + +### Run molecule + +With the virtual environment from the previous step active in your shell session, you can now use molecule to test the playbook. +Interesting commands are: + +- `molecule create`: Create virtual machines for the test cluster nodes. +- `molecule destroy`: Delete the virtual machines for the test cluster nodes. +- `molecule converge`: Run the `site` playbook on the nodes of the test cluster. +- `molecule side_effect`: Run the `reset` playbook on the nodes of the test cluster. +- `molecule verify`: Verify that the cluster works correctly. +- `molecule test`: The "all-in-one" sequence of steps that is executed in CI. + This includes the `create`, `converge`, `verify`, `side_effect` and `destroy` steps. + See [`molecule.yml`](default/molecule.yml) for more details. diff --git a/k3s-ansible-copia/molecule/calico/molecule.yml b/k3s-ansible-copia/molecule/calico/molecule.yml new file mode 100644 index 0000000..e4ddb25 --- /dev/null +++ b/k3s-ansible-copia/molecule/calico/molecule.yml @@ -0,0 +1,49 @@ +--- +dependency: + name: galaxy +driver: + name: vagrant +platforms: + - name: control1 + box: generic/ubuntu2204 + memory: 4096 + cpus: 4 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.62 +provisioner: + name: ansible + env: + ANSIBLE_VERBOSITY: 1 + playbooks: + converge: ../resources/converge.yml + side_effect: ../resources/reset.yml + verify: ../resources/verify.yml + inventory: + links: + group_vars: ../../inventory/sample/group_vars +scenario: + test_sequence: + - dependency + - cleanup + - destroy + - syntax + - create + - prepare + - converge + # idempotence is not possible with the playbook in its current form. + - verify + # We are repurposing side_effect here to test the reset playbook. + # This is why we do not run it before verify (which tests the cluster), + # but after the verify step. + - side_effect + - cleanup + - destroy diff --git a/k3s-ansible-copia/molecule/calico/overrides.yml b/k3s-ansible-copia/molecule/calico/overrides.yml new file mode 100644 index 0000000..a63ec44 --- /dev/null +++ b/k3s-ansible-copia/molecule/calico/overrides.yml @@ -0,0 +1,16 @@ +--- +- name: Apply overrides + hosts: all + tasks: + - name: Override host variables + ansible.builtin.set_fact: + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant + calico_iface: eth1 + + # The test VMs might be a bit slow, so we give them more time to join the cluster: + retry_count: 45 + + # Make sure that our IP ranges do not collide with those of the other scenarios + apiserver_endpoint: 192.168.30.224 + metal_lb_ip_range: 192.168.30.100-192.168.30.109 diff --git a/k3s-ansible-copia/molecule/cilium/molecule.yml b/k3s-ansible-copia/molecule/cilium/molecule.yml new file mode 100644 index 0000000..542b6d5 --- /dev/null +++ b/k3s-ansible-copia/molecule/cilium/molecule.yml @@ -0,0 +1,49 @@ +--- +dependency: + name: galaxy +driver: + name: vagrant +platforms: + - name: control1 + box: generic/ubuntu2204 + memory: 4096 + cpus: 4 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.63 +provisioner: + name: ansible + env: + ANSIBLE_VERBOSITY: 1 + playbooks: + converge: ../resources/converge.yml + side_effect: ../resources/reset.yml + verify: ../resources/verify.yml + inventory: + links: + group_vars: ../../inventory/sample/group_vars +scenario: + test_sequence: + - dependency + - cleanup + - destroy + - syntax + - create + - prepare + - converge + # idempotence is not possible with the playbook in its current form. + - verify + # We are repurposing side_effect here to test the reset playbook. + # This is why we do not run it before verify (which tests the cluster), + # but after the verify step. + - side_effect + - cleanup + - destroy diff --git a/k3s-ansible-copia/molecule/cilium/overrides.yml b/k3s-ansible-copia/molecule/cilium/overrides.yml new file mode 100644 index 0000000..c602a28 --- /dev/null +++ b/k3s-ansible-copia/molecule/cilium/overrides.yml @@ -0,0 +1,16 @@ +--- +- name: Apply overrides + hosts: all + tasks: + - name: Override host variables + ansible.builtin.set_fact: + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant + cilium_iface: eth1 + + # The test VMs might be a bit slow, so we give them more time to join the cluster: + retry_count: 45 + + # Make sure that our IP ranges do not collide with those of the other scenarios + apiserver_endpoint: 192.168.30.225 + metal_lb_ip_range: 192.168.30.110-192.168.30.119 diff --git a/k3s-ansible-copia/molecule/default/molecule.yml b/k3s-ansible-copia/molecule/default/molecule.yml new file mode 100644 index 0000000..1ad61f4 --- /dev/null +++ b/k3s-ansible-copia/molecule/default/molecule.yml @@ -0,0 +1,99 @@ +--- +dependency: + name: galaxy +driver: + name: vagrant +platforms: + - name: control1 + box: generic/ubuntu2204 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.38 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + + - name: control2 + box: generic/debian12 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.39 + + - name: control3 + box: generic/rocky9 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.40 + + - name: node1 + box: generic/ubuntu2204 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - node + interfaces: + - network_name: private_network + ip: 192.168.30.41 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + + - name: node2 + box: generic/rocky9 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - node + interfaces: + - network_name: private_network + ip: 192.168.30.42 + +provisioner: + name: ansible + env: + ANSIBLE_VERBOSITY: 1 + playbooks: + converge: ../resources/converge.yml + side_effect: ../resources/reset.yml + verify: ../resources/verify.yml + inventory: + links: + group_vars: ../../inventory/sample/group_vars +scenario: + test_sequence: + - dependency + - cleanup + - destroy + - syntax + - create + - prepare + - converge + # idempotence is not possible with the playbook in its current form. + - verify + # We are repurposing side_effect here to test the reset playbook. + # This is why we do not run it before verify (which tests the cluster), + # but after the verify step. + - side_effect + - cleanup + - destroy diff --git a/k3s-ansible-copia/molecule/default/overrides.yml b/k3s-ansible-copia/molecule/default/overrides.yml new file mode 100644 index 0000000..4eea472 --- /dev/null +++ b/k3s-ansible-copia/molecule/default/overrides.yml @@ -0,0 +1,12 @@ +--- +- name: Apply overrides + hosts: all + tasks: + - name: Override host variables + ansible.builtin.set_fact: + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant + flannel_iface: eth1 + + # The test VMs might be a bit slow, so we give them more time to join the cluster: + retry_count: 45 diff --git a/k3s-ansible-copia/molecule/default/prepare.yml b/k3s-ansible-copia/molecule/default/prepare.yml new file mode 100644 index 0000000..044aa79 --- /dev/null +++ b/k3s-ansible-copia/molecule/default/prepare.yml @@ -0,0 +1,22 @@ +--- +- name: Apply overrides + ansible.builtin.import_playbook: >- + {{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml + +- name: Network setup + hosts: all + tasks: + - name: Disable firewalld + when: ansible_distribution == "Rocky" + # Rocky Linux comes with firewalld enabled. It blocks some of the network + # connections needed for our k3s cluster. For our test setup, we just disable + # it since the VM host's firewall is still active for connections to and from + # the Internet. + # When building your own cluster, please DO NOT blindly copy this. Instead, + # please create a custom firewall configuration that fits your network design + # and security needs. + ansible.builtin.systemd: + name: firewalld + enabled: false + state: stopped + become: true diff --git a/k3s-ansible-copia/molecule/ipv6/README.md b/k3s-ansible-copia/molecule/ipv6/README.md new file mode 100644 index 0000000..eaaeeab --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/README.md @@ -0,0 +1,35 @@ +# Sample IPv6 configuration for `k3s-ansible` + +This scenario contains a cluster configuration which is _IPv6 first_, but still supports dual-stack networking with IPv4 for most things. +This means: + +- The API server VIP is an IPv6 address. +- The MetalLB pool consists of both IPv4 and IPv4 addresses. +- Nodes as well as cluster-internal resources (pods and services) are accessible via IPv4 as well as IPv6. + +## Network design + +All IPv6 addresses used in this scenario share a single `/48` prefix: `fdad:bad:ba55`. +The following subnets are used: + +- `fdad:bad:ba55:`**`0`**`::/64` is the subnet which contains the cluster components meant for external access. + That includes: + + - The VIP for the Kubernetes API server: `fdad:bad:ba55::333` + - Services load-balanced by MetalLB: `fdad:bad:ba55::1b:0/112` + - Cluster nodes: `fdad:bad:ba55::de:0/112` + - The host executing Vagrant: `fdad:bad:ba55::1` + + In a home lab setup, this might be your LAN. + +- `fdad:bad:ba55:`**`4200`**`::/56` is used internally by the cluster for pods. + +- `fdad:bad:ba55:`**`4300`**`::/108` is used internally by the cluster for services. + +IPv4 networking is also available: + +- The nodes have addresses inside `192.168.123.0/24`. + MetalLB also has a bit of address space in this range: `192.168.123.80-192.168.123.90` +- For pods and services, the k3s defaults (`10.42.0.0/16` and `10.43.0.0/16)` are used. + +Note that the host running Vagrant is not part any of these IPv4 networks. diff --git a/k3s-ansible-copia/molecule/ipv6/host_vars/control1.yml b/k3s-ansible-copia/molecule/ipv6/host_vars/control1.yml new file mode 100644 index 0000000..aa675db --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/host_vars/control1.yml @@ -0,0 +1,3 @@ +--- +node_ipv4: 192.168.123.11 +node_ipv6: fdad:bad:ba55::de:11 diff --git a/k3s-ansible-copia/molecule/ipv6/host_vars/control2.yml b/k3s-ansible-copia/molecule/ipv6/host_vars/control2.yml new file mode 100644 index 0000000..97fbc81 --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/host_vars/control2.yml @@ -0,0 +1,3 @@ +--- +node_ipv4: 192.168.123.12 +node_ipv6: fdad:bad:ba55::de:12 diff --git a/k3s-ansible-copia/molecule/ipv6/host_vars/node1.yml b/k3s-ansible-copia/molecule/ipv6/host_vars/node1.yml new file mode 100644 index 0000000..57ba927 --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/host_vars/node1.yml @@ -0,0 +1,3 @@ +--- +node_ipv4: 192.168.123.21 +node_ipv6: fdad:bad:ba55::de:21 diff --git a/k3s-ansible-copia/molecule/ipv6/molecule.yml b/k3s-ansible-copia/molecule/ipv6/molecule.yml new file mode 100644 index 0000000..5c2454e --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/molecule.yml @@ -0,0 +1,81 @@ +--- +dependency: + name: galaxy +driver: + name: vagrant +platforms: + - name: control1 + box: generic/ubuntu2204 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: fdad:bad:ba55::de:11 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + + - name: control2 + box: generic/ubuntu2204 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: fdad:bad:ba55::de:12 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + + - name: node1 + box: generic/ubuntu2204 + memory: 1024 + cpus: 2 + groups: + - k3s_cluster + - node + interfaces: + - network_name: private_network + ip: fdad:bad:ba55::de:21 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant +provisioner: + name: ansible + env: + ANSIBLE_VERBOSITY: 1 + playbooks: + converge: ../resources/converge.yml + side_effect: ../resources/reset.yml + verify: ../resources/verify.yml + inventory: + links: + group_vars: ../../inventory/sample/group_vars +scenario: + test_sequence: + - dependency + - cleanup + - destroy + - syntax + - create + - prepare + - converge + # idempotence is not possible with the playbook in its current form. + - verify + # We are repurposing side_effect here to test the reset playbook. + # This is why we do not run it before verify (which tests the cluster), + # but after the verify step. + - side_effect + - cleanup + - destroy diff --git a/k3s-ansible-copia/molecule/ipv6/overrides.yml b/k3s-ansible-copia/molecule/ipv6/overrides.yml new file mode 100644 index 0000000..44bbc07 --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/overrides.yml @@ -0,0 +1,51 @@ +--- +- name: Apply overrides + hosts: all + tasks: + - name: Override host variables (1/2) + ansible.builtin.set_fact: + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant + flannel_iface: eth1 + + # In this scenario, we have multiple interfaces that the VIP could be + # broadcasted on. Since we have assigned a dedicated private network + # here, let's make sure that it is used. + kube_vip_iface: eth1 + + # The test VMs might be a bit slow, so we give them more time to join the cluster: + retry_count: 45 + + # IPv6 configuration + # ###################################################################### + + # The API server will be reachable on IPv6 only + apiserver_endpoint: fdad:bad:ba55::333 + + # We give MetalLB address space for both IPv4 and IPv6 + metal_lb_ip_range: + - fdad:bad:ba55::1b:0/112 + - 192.168.123.80-192.168.123.90 + + # k3s_node_ip is by default set to the IPv4 address of flannel_iface. + # We want IPv6 addresses here of course, so we just specify them + # manually below. + k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}" + + - name: Override host variables (2/2) + # Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have + # to set this AFTER overriding the both of them. + ansible.builtin.set_fact: + # A few extra server args are necessary: + # - the network policy needs to be disabled. + # - we need to manually specify the subnets for services and pods, as + # the default has IPv4 ranges only. + extra_server_args: >- + {{ extra_args }} + --tls-san {{ apiserver_endpoint }} + {{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }} + --disable servicelb + --disable traefik + --disable-network-policy + --cluster-cidr=10.42.0.0/16,fdad:bad:ba55:4200::/56 + --service-cidr=10.43.0.0/16,fdad:bad:ba55:4300::/108 diff --git a/k3s-ansible-copia/molecule/ipv6/prepare.yml b/k3s-ansible-copia/molecule/ipv6/prepare.yml new file mode 100644 index 0000000..9763458 --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/prepare.yml @@ -0,0 +1,51 @@ +--- +- name: Apply overrides + ansible.builtin.import_playbook: >- + {{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml + +- name: Configure dual-stack networking + hosts: all + become: true + + # Unfortunately, as of 2022-09, Vagrant does not support the configuration + # of both IPv4 and IPv6 addresses for a single network adapter. So we have + # to configure that ourselves. + # Moreover, we have to explicitly enable IPv6 for the loopback interface. + + tasks: + - name: Enable IPv6 for network interfaces + ansible.posix.sysctl: + name: net.ipv6.conf.{{ item }}.disable_ipv6 + value: "0" + with_items: + - all + - default + - lo + + - name: Disable duplicate address detection + # Duplicate address detection did repeatedly fail within the virtual + # network. But since this setup does not use SLAAC anyway, we can safely + # disable it. + ansible.posix.sysctl: + name: net.ipv6.conf.{{ item }}.accept_dad + value: "0" + with_items: + - "{{ flannel_iface }}" + + - name: Write IPv4 configuration + ansible.builtin.template: + src: 55-flannel-ipv4.yaml.j2 + dest: /etc/netplan/55-flannel-ipv4.yaml + owner: root + group: root + mode: "0644" + register: netplan_template + + - name: Apply netplan configuration + # Conceptually, this should be a handler rather than a task. + # However, we are currently not in a role context - creating + # one just for this seemed overkill. + when: netplan_template.changed + ansible.builtin.command: + cmd: netplan apply + changed_when: true diff --git a/k3s-ansible-copia/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 b/k3s-ansible-copia/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 new file mode 100644 index 0000000..6f68777 --- /dev/null +++ b/k3s-ansible-copia/molecule/ipv6/templates/55-flannel-ipv4.yaml.j2 @@ -0,0 +1,8 @@ +--- +network: + version: 2 + renderer: networkd + ethernets: + {{ flannel_iface }}: + addresses: + - {{ node_ipv4 }}/24 diff --git a/k3s-ansible-copia/molecule/kube-vip/molecule.yml b/k3s-ansible-copia/molecule/kube-vip/molecule.yml new file mode 100644 index 0000000..e4ddb25 --- /dev/null +++ b/k3s-ansible-copia/molecule/kube-vip/molecule.yml @@ -0,0 +1,49 @@ +--- +dependency: + name: galaxy +driver: + name: vagrant +platforms: + - name: control1 + box: generic/ubuntu2204 + memory: 4096 + cpus: 4 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.62 +provisioner: + name: ansible + env: + ANSIBLE_VERBOSITY: 1 + playbooks: + converge: ../resources/converge.yml + side_effect: ../resources/reset.yml + verify: ../resources/verify.yml + inventory: + links: + group_vars: ../../inventory/sample/group_vars +scenario: + test_sequence: + - dependency + - cleanup + - destroy + - syntax + - create + - prepare + - converge + # idempotence is not possible with the playbook in its current form. + - verify + # We are repurposing side_effect here to test the reset playbook. + # This is why we do not run it before verify (which tests the cluster), + # but after the verify step. + - side_effect + - cleanup + - destroy diff --git a/k3s-ansible-copia/molecule/kube-vip/overrides.yml b/k3s-ansible-copia/molecule/kube-vip/overrides.yml new file mode 100644 index 0000000..4577afc --- /dev/null +++ b/k3s-ansible-copia/molecule/kube-vip/overrides.yml @@ -0,0 +1,17 @@ +--- +- name: Apply overrides + hosts: all + tasks: + - name: Override host variables + ansible.builtin.set_fact: + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant + flannel_iface: eth1 + + # The test VMs might be a bit slow, so we give them more time to join the cluster: + retry_count: 45 + + # Make sure that our IP ranges do not collide with those of the other scenarios + apiserver_endpoint: 192.168.30.225 + # Use kube-vip instead of MetalLB + kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119 diff --git a/k3s-ansible-copia/molecule/resources/converge.yml b/k3s-ansible-copia/molecule/resources/converge.yml new file mode 100644 index 0000000..c5efc8e --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/converge.yml @@ -0,0 +1,7 @@ +--- +- name: Apply overrides + ansible.builtin.import_playbook: >- + {{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml + +- name: Converge + ansible.builtin.import_playbook: ../../site.yml diff --git a/k3s-ansible-copia/molecule/resources/reset.yml b/k3s-ansible-copia/molecule/resources/reset.yml new file mode 100644 index 0000000..266ce85 --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/reset.yml @@ -0,0 +1,7 @@ +--- +- name: Apply overrides + ansible.builtin.import_playbook: >- + {{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml + +- name: Reset + ansible.builtin.import_playbook: ../../reset.yml diff --git a/k3s-ansible-copia/molecule/resources/verify.yml b/k3s-ansible-copia/molecule/resources/verify.yml new file mode 100644 index 0000000..ef7ea52 --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify.yml @@ -0,0 +1,5 @@ +--- +- name: Verify + hosts: all + roles: + - verify_from_outside diff --git a/k3s-ansible-copia/molecule/resources/verify_from_outside/defaults/main.yml b/k3s-ansible-copia/molecule/resources/verify_from_outside/defaults/main.yml new file mode 100644 index 0000000..104fda4 --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify_from_outside/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# A host outside of the cluster from which the checks shall be performed +outside_host: localhost + +# This kubernetes namespace will be used for testing +testing_namespace: molecule-verify-from-outside + +# The directory in which the example manifests reside +example_manifests_path: ../../../example diff --git a/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-cleanup.yml b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-cleanup.yml new file mode 100644 index 0000000..9645af1 --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-cleanup.yml @@ -0,0 +1,5 @@ +--- +- name: Clean up kubecfg + ansible.builtin.file: + path: "{{ kubecfg.path }}" + state: absent diff --git a/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-fetch.yml b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-fetch.yml new file mode 100644 index 0000000..d7f498e --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/kubecfg-fetch.yml @@ -0,0 +1,19 @@ +--- +- name: Create temporary directory for kubecfg + ansible.builtin.tempfile: + state: directory + suffix: kubecfg + register: kubecfg +- name: Gathering facts + delegate_to: "{{ groups['master'][0] }}" + ansible.builtin.gather_facts: +- name: Download kubecfg + ansible.builtin.fetch: + src: "{{ ansible_env.HOME }}/.kube/config" + dest: "{{ kubecfg.path }}/" + flat: true + delegate_to: "{{ groups['master'][0] }}" + delegate_facts: true +- name: Store path to kubecfg + ansible.builtin.set_fact: + kubecfg_path: "{{ kubecfg.path }}/config" diff --git a/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/main.yml b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/main.yml new file mode 100644 index 0000000..2f43a27 --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Verify + run_once: true + delegate_to: "{{ outside_host }}" + block: + - name: "Test CASE: Get kube config" + ansible.builtin.import_tasks: kubecfg-fetch.yml + - name: "TEST CASE: Get nodes" + ansible.builtin.include_tasks: test/get-nodes.yml + - name: "TEST CASE: Deploy example" + ansible.builtin.include_tasks: test/deploy-example.yml + always: + - name: "TEST CASE: Cleanup" + ansible.builtin.import_tasks: kubecfg-cleanup.yml diff --git a/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/deploy-example.yml b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/deploy-example.yml new file mode 100644 index 0000000..13a1c4b --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/deploy-example.yml @@ -0,0 +1,58 @@ +--- +- name: Deploy example + block: + - name: "Create namespace: {{ testing_namespace }}" + kubernetes.core.k8s: + api_version: v1 + kind: Namespace + name: "{{ testing_namespace }}" + state: present + wait: true + kubeconfig: "{{ kubecfg_path }}" + + - name: Apply example manifests + kubernetes.core.k8s: + src: "{{ example_manifests_path }}/{{ item }}" + namespace: "{{ testing_namespace }}" + state: present + wait: true + kubeconfig: "{{ kubecfg_path }}" + with_items: + - deployment.yml + - service.yml + + - name: Get info about nginx service + kubernetes.core.k8s_info: + kind: service + name: nginx + namespace: "{{ testing_namespace }}" + kubeconfig: "{{ kubecfg_path }}" + vars: + metallb_ip: status.loadBalancer.ingress[0].ip + metallb_port: spec.ports[0].port + register: nginx_services + + - name: Assert that the nginx welcome page is available + ansible.builtin.uri: + url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/ + return_content: true + register: result + failed_when: "'Welcome to nginx!' not in result.content" + vars: + ip: >- + {{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }} + port_: >- + {{ nginx_services.resources[0].spec.ports[0].port }} + # Deactivated linter rules: + # - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap + # would be undefined. This will not be the case during playbook execution. + # noqa jinja[invalid] + + always: + - name: "Remove namespace: {{ testing_namespace }}" + kubernetes.core.k8s: + api_version: v1 + kind: Namespace + name: "{{ testing_namespace }}" + state: absent + kubeconfig: "{{ kubecfg_path }}" diff --git a/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/get-nodes.yml b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/get-nodes.yml new file mode 100644 index 0000000..99b86a4 --- /dev/null +++ b/k3s-ansible-copia/molecule/resources/verify_from_outside/tasks/test/get-nodes.yml @@ -0,0 +1,28 @@ +--- +- name: Get all nodes in cluster + kubernetes.core.k8s_info: + kind: node + kubeconfig: "{{ kubecfg_path }}" + register: cluster_nodes + +- name: Assert that the cluster contains exactly the expected nodes + ansible.builtin.assert: + that: found_nodes == expected_nodes + success_msg: "Found nodes as expected: {{ found_nodes }}" + fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }} + vars: + found_nodes: >- + {{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }} + expected_nodes: |- + {{ + ( + ( groups['master'] | default([]) ) + + ( groups['node'] | default([]) ) + ) + | unique + | sort + }} + # Deactivated linter rules: + # - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap + # would be undefined. This will not be the case during playbook execution. + # noqa jinja[invalid] diff --git a/k3s-ansible-copia/molecule/single_node/molecule.yml b/k3s-ansible-copia/molecule/single_node/molecule.yml new file mode 100644 index 0000000..c6d45fc --- /dev/null +++ b/k3s-ansible-copia/molecule/single_node/molecule.yml @@ -0,0 +1,49 @@ +--- +dependency: + name: galaxy +driver: + name: vagrant +platforms: + - name: control1 + box: generic/ubuntu2204 + memory: 4096 + cpus: 4 + config_options: + # We currently can not use public-key based authentication on Ubuntu 22.04, + # see: https://github.com/chef/bento/issues/1405 + ssh.username: vagrant + ssh.password: vagrant + groups: + - k3s_cluster + - master + interfaces: + - network_name: private_network + ip: 192.168.30.50 +provisioner: + name: ansible + env: + ANSIBLE_VERBOSITY: 1 + playbooks: + converge: ../resources/converge.yml + side_effect: ../resources/reset.yml + verify: ../resources/verify.yml + inventory: + links: + group_vars: ../../inventory/sample/group_vars +scenario: + test_sequence: + - dependency + - cleanup + - destroy + - syntax + - create + - prepare + - converge + # idempotence is not possible with the playbook in its current form. + - verify + # We are repurposing side_effect here to test the reset playbook. + # This is why we do not run it before verify (which tests the cluster), + # but after the verify step. + - side_effect + - cleanup + - destroy diff --git a/k3s-ansible-copia/molecule/single_node/overrides.yml b/k3s-ansible-copia/molecule/single_node/overrides.yml new file mode 100644 index 0000000..2cb8ec7 --- /dev/null +++ b/k3s-ansible-copia/molecule/single_node/overrides.yml @@ -0,0 +1,16 @@ +--- +- name: Apply overrides + hosts: all + tasks: + - name: Override host variables + ansible.builtin.set_fact: + # See: + # https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant + flannel_iface: eth1 + + # The test VMs might be a bit slow, so we give them more time to join the cluster: + retry_count: 45 + + # Make sure that our IP ranges do not collide with those of the default scenario + apiserver_endpoint: 192.168.30.223 + metal_lb_ip_range: 192.168.30.91-192.168.30.99 diff --git a/k3s-ansible-copia/reboot.sh b/k3s-ansible-copia/reboot.sh new file mode 100755 index 0000000..95f66a6 --- /dev/null +++ b/k3s-ansible-copia/reboot.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +ansible-playbook reboot.yml diff --git a/k3s-ansible-copia/reboot.yml b/k3s-ansible-copia/reboot.yml new file mode 100644 index 0000000..e0fa8b9 --- /dev/null +++ b/k3s-ansible-copia/reboot.yml @@ -0,0 +1,10 @@ +--- +- name: Reboot k3s_cluster + hosts: k3s_cluster + gather_facts: true + tasks: + - name: Reboot the nodes (and Wait upto 5 mins max) + become: true + ansible.builtin.reboot: + reboot_command: "{{ custom_reboot_command | default(omit) }}" + reboot_timeout: 300 diff --git a/k3s-ansible-copia/requirements.in b/k3s-ansible-copia/requirements.in new file mode 100644 index 0000000..e0eac29 --- /dev/null +++ b/k3s-ansible-copia/requirements.in @@ -0,0 +1,10 @@ +ansible-core>=2.16.2 +jmespath>=1.0.1 +jsonpatch>=1.33 +kubernetes>=29.0.0 +molecule-plugins[vagrant] +molecule>=6.0.3 +netaddr>=0.10.1 +pre-commit>=3.6.0 +pre-commit-hooks>=4.5.0 +pyyaml>=6.0.1 diff --git a/k3s-ansible-copia/requirements.txt b/k3s-ansible-copia/requirements.txt new file mode 100644 index 0000000..8370016 --- /dev/null +++ b/k3s-ansible-copia/requirements.txt @@ -0,0 +1,169 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile requirements.in +# +ansible-compat==4.1.11 + # via molecule +ansible-core==2.18.0 + # via + # -r requirements.in + # ansible-compat + # molecule +attrs==23.2.0 + # via + # jsonschema + # referencing +bracex==2.4 + # via wcmatch +cachetools==5.3.2 + # via google-auth +certifi==2023.11.17 + # via + # kubernetes + # requests +cffi==1.16.0 + # via cryptography +cfgv==3.4.0 + # via pre-commit +charset-normalizer==3.3.2 + # via requests +click==8.1.7 + # via + # click-help-colors + # molecule +click-help-colors==0.9.4 + # via molecule +cryptography==41.0.7 + # via ansible-core +distlib==0.3.8 + # via virtualenv +enrich==1.2.7 + # via molecule +filelock==3.13.1 + # via virtualenv +google-auth==2.26.2 + # via kubernetes +identify==2.5.33 + # via pre-commit +idna==3.6 + # via requests +jinja2==3.1.3 + # via + # ansible-core + # molecule +jmespath==1.0.1 + # via -r requirements.in +jsonpatch==1.33 + # via -r requirements.in +jsonpointer==2.4 + # via jsonpatch +jsonschema==4.21.1 + # via + # ansible-compat + # molecule +jsonschema-specifications==2023.12.1 + # via jsonschema +kubernetes==29.0.0 + # via -r requirements.in +markdown-it-py==3.0.0 + # via rich +markupsafe==2.1.4 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +molecule==6.0.3 + # via + # -r requirements.in + # molecule-plugins +molecule-plugins[vagrant]==23.5.3 + # via -r requirements.in +netaddr==0.10.1 + # via -r requirements.in +nodeenv==1.8.0 + # via pre-commit +oauthlib==3.2.2 + # via + # kubernetes + # requests-oauthlib +packaging==23.2 + # via + # ansible-compat + # ansible-core + # molecule +platformdirs==4.1.0 + # via virtualenv +pluggy==1.3.0 + # via molecule +pre-commit==3.8.0 + # via -r requirements.in +pre-commit-hooks==4.6.0 + # via -r requirements.in +pyasn1==0.5.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.3.0 + # via google-auth +pycparser==2.21 + # via cffi +pygments==2.17.2 + # via rich +python-dateutil==2.8.2 + # via kubernetes +python-vagrant==1.0.0 + # via molecule-plugins +pyyaml==6.0.2 + # via + # -r requirements.in + # ansible-compat + # ansible-core + # kubernetes + # molecule + # pre-commit +referencing==0.32.1 + # via + # jsonschema + # jsonschema-specifications +requests==2.31.0 + # via + # kubernetes + # requests-oauthlib +requests-oauthlib==1.3.1 + # via kubernetes +resolvelib==1.0.1 + # via ansible-core +rich==13.7.0 + # via + # enrich + # molecule +rpds-py==0.17.1 + # via + # jsonschema + # referencing +rsa==4.9 + # via google-auth +ruamel-yaml==0.18.5 + # via pre-commit-hooks +ruamel-yaml-clib==0.2.8 + # via ruamel-yaml +six==1.16.0 + # via + # kubernetes + # python-dateutil +subprocess-tee==0.4.1 + # via ansible-compat +urllib3==2.1.0 + # via + # kubernetes + # requests +virtualenv==20.25.0 + # via pre-commit +wcmatch==8.5 + # via molecule +websocket-client==1.7.0 + # via kubernetes + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/k3s-ansible-copia/reset.sh b/k3s-ansible-copia/reset.sh new file mode 100755 index 0000000..bd9dcae --- /dev/null +++ b/k3s-ansible-copia/reset.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +ansible-playbook reset.yml diff --git a/k3s-ansible-copia/reset.yml b/k3s-ansible-copia/reset.yml new file mode 100644 index 0000000..238ce70 --- /dev/null +++ b/k3s-ansible-copia/reset.yml @@ -0,0 +1,25 @@ +--- +- name: Reset k3s cluster + hosts: k3s_cluster + gather_facts: true + roles: + - role: reset + become: true + - role: raspberrypi + become: true + vars: { state: absent } + post_tasks: + - name: Reboot and wait for node to come back up + become: true + ansible.builtin.reboot: + reboot_command: "{{ custom_reboot_command | default(omit) }}" + reboot_timeout: 3600 + +- name: Revert changes to Proxmox cluster + hosts: proxmox + gather_facts: true + become: true + remote_user: "{{ proxmox_lxc_ssh_user }}" + roles: + - role: reset_proxmox_lxc + when: proxmox_lxc_configure diff --git a/k3s-ansible-copia/roles/download/meta/main.yml b/k3s-ansible-copia/roles/download/meta/main.yml new file mode 100644 index 0000000..e7911d5 --- /dev/null +++ b/k3s-ansible-copia/roles/download/meta/main.yml @@ -0,0 +1,8 @@ +--- +argument_specs: + main: + short_description: Manage the downloading of K3S binaries + options: + k3s_version: + description: The desired version of K3S + required: true diff --git a/k3s-ansible-copia/roles/download/tasks/main.yml b/k3s-ansible-copia/roles/download/tasks/main.yml new file mode 100644 index 0000000..51cd35e --- /dev/null +++ b/k3s-ansible-copia/roles/download/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: Download k3s binary x64 + ansible.builtin.get_url: + url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s + checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt + dest: /usr/local/bin/k3s + owner: root + group: root + mode: "0755" + when: ansible_facts.architecture == "x86_64" + +- name: Download k3s binary arm64 + ansible.builtin.get_url: + url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64 + checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt + dest: /usr/local/bin/k3s + owner: root + group: root + mode: "0755" + when: + - ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" ) + or ansible_facts.architecture is search("aarch64") + +- name: Download k3s binary armhf + ansible.builtin.get_url: + url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf + checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt + dest: /usr/local/bin/k3s + owner: root + group: root + mode: "0755" + when: + - ansible_facts.architecture is search("arm") + - ansible_facts.userspace_bits == "32" diff --git a/k3s-ansible-copia/roles/k3s/node/defaults/main.yml b/k3s-ansible-copia/roles/k3s/node/defaults/main.yml new file mode 100644 index 0000000..a07af66 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s/node/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# Name of the master group +group_name_master: master diff --git a/k3s-ansible-copia/roles/k3s_agent/defaults/main.yml b/k3s-ansible-copia/roles/k3s_agent/defaults/main.yml new file mode 100644 index 0000000..bdf76ae --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_agent/defaults/main.yml @@ -0,0 +1,4 @@ +--- +extra_agent_args: "" +group_name_master: master +systemd_dir: /etc/systemd/system diff --git a/k3s-ansible-copia/roles/k3s_agent/meta/main.yml b/k3s-ansible-copia/roles/k3s_agent/meta/main.yml new file mode 100644 index 0000000..cec4ba0 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_agent/meta/main.yml @@ -0,0 +1,39 @@ +--- +argument_specs: + main: + short_description: Setup k3s agents + options: + apiserver_endpoint: + description: Virtual ip-address configured on each master + required: true + + extra_agent_args: + description: Extra arguments for agents nodes + + group_name_master: + description: Name of the master group + default: master + + k3s_token: + description: Token used to communicate between masters + + proxy_env: + type: dict + description: + - Internet proxy configurations. + - See https://docs.k3s.io/advanced#configuring-an-http-proxy for details + default: ~ + options: + HTTP_PROXY: + description: HTTP internet proxy + required: true + HTTPS_PROXY: + description: HTTPS internet proxy + required: true + NO_PROXY: + description: Addresses that will not use the proxies + required: true + + systemd_dir: + description: Path to systemd services + default: /etc/systemd/system diff --git a/k3s-ansible-copia/roles/k3s_agent/tasks/http_proxy.yml b/k3s-ansible-copia/roles/k3s_agent/tasks/http_proxy.yml new file mode 100644 index 0000000..8b58777 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_agent/tasks/http_proxy.yml @@ -0,0 +1,18 @@ +--- +- name: Create k3s-node.service.d directory + ansible.builtin.file: + path: "{{ systemd_dir }}/k3s-node.service.d" + state: directory + owner: root + group: root + mode: "0755" + when: proxy_env is defined + +- name: Copy K3s http_proxy conf file + ansible.builtin.template: + src: http_proxy.conf.j2 + dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf" + owner: root + group: root + mode: "0755" + when: proxy_env is defined diff --git a/k3s-ansible-copia/roles/k3s_agent/tasks/main.yml b/k3s-ansible-copia/roles/k3s_agent/tasks/main.yml new file mode 100644 index 0000000..c522f21 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_agent/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Check for PXE-booted system + block: + - name: Check if system is PXE-booted + ansible.builtin.command: + cmd: cat /proc/cmdline + register: boot_cmdline + changed_when: false + check_mode: false + + - name: Set fact for PXE-booted system + ansible.builtin.set_fact: + is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}" + when: boot_cmdline.stdout is defined + + - name: Include http_proxy configuration tasks + ansible.builtin.include_tasks: http_proxy.yml + +- name: Deploy K3s http_proxy conf + ansible.builtin.include_tasks: http_proxy.yml + when: proxy_env is defined + +- name: Configure the k3s service + ansible.builtin.template: + src: k3s.service.j2 + dest: "{{ systemd_dir }}/k3s-node.service" + owner: root + group: root + mode: "0755" + +- name: Manage k3s service + ansible.builtin.systemd: + name: k3s-node + daemon_reload: true + state: restarted + enabled: true diff --git a/k3s-ansible-copia/roles/k3s_agent/templates/http_proxy.conf.j2 b/k3s-ansible-copia/roles/k3s_agent/templates/http_proxy.conf.j2 new file mode 100644 index 0000000..6591d45 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_agent/templates/http_proxy.conf.j2 @@ -0,0 +1,4 @@ +[Service] +Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }} +Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }} +Environment=NO_PROXY={{ proxy_env.NO_PROXY }} diff --git a/k3s-ansible-copia/roles/k3s_agent/templates/k3s.service.j2 b/k3s-ansible-copia/roles/k3s_agent/templates/k3s.service.j2 new file mode 100644 index 0000000..52aa272 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_agent/templates/k3s.service.j2 @@ -0,0 +1,27 @@ +[Unit] +Description=Lightweight Kubernetes +Documentation=https://k3s.io +After=network-online.target + +[Service] +Type=notify +ExecStartPre=-/sbin/modprobe br_netfilter +ExecStartPre=-/sbin/modprobe overlay +# Conditional snapshotter based on PXE boot status +ExecStart=/usr/local/bin/k3s agent \ + --server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \ + {% if is_pxe_booted | default(false) %}--snapshotter native \ + {% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \ + {{ extra_agent_args }} +KillMode=process +Delegate=yes +LimitNOFILE=1048576 +LimitNPROC=infinity +LimitCORE=infinity +TasksMax=infinity +TimeoutStartSec=0 +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target diff --git a/k3s-ansible-copia/roles/k3s_custom_registries/meta/main.yml b/k3s-ansible-copia/roles/k3s_custom_registries/meta/main.yml new file mode 100644 index 0000000..3c0878f --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_custom_registries/meta/main.yml @@ -0,0 +1,20 @@ +--- +argument_specs: + main: + short_description: Configure the use of a custom container registry + options: + custom_registries_yaml: + description: + - YAML block defining custom registries. + - > + The following is an example that pulls all images used in + this playbook through your private registries. + - > + It also allows you to pull your own images from your private + registry, without having to use imagePullSecrets in your + deployments. + - > + If all you need is your own images and you don't care about + caching the docker/quay/ghcr.io images, you can just remove + those from the mirrors: section. + required: true diff --git a/k3s-ansible-copia/roles/k3s_custom_registries/tasks/main.yml b/k3s-ansible-copia/roles/k3s_custom_registries/tasks/main.yml new file mode 100644 index 0000000..cfbb1ec --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_custom_registries/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Create directory /etc/rancher/k3s + ansible.builtin.file: + path: /etc/{{ item }} + state: directory + mode: "0755" + loop: + - rancher + - rancher/k3s + +- name: Insert registries into /etc/rancher/k3s/registries.yaml + ansible.builtin.blockinfile: + path: /etc/rancher/k3s/registries.yaml + block: "{{ custom_registries_yaml }}" + mode: "0600" + create: true diff --git a/k3s-ansible-copia/roles/k3s_server/defaults/main.yml b/k3s-ansible-copia/roles/k3s_server/defaults/main.yml new file mode 100644 index 0000000..1d18efd --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/defaults/main.yml @@ -0,0 +1,40 @@ +--- +extra_server_args: "" + +k3s_kubectl_binary: k3s kubectl + +group_name_master: master + +kube_vip_arp: true +kube_vip_iface: +kube_vip_cloud_provider_tag_version: main +kube_vip_tag_version: v0.7.2 + +kube_vip_bgp: false +kube_vip_bgp_routerid: 127.0.0.1 +kube_vip_bgp_as: "64513" +kube_vip_bgp_peeraddress: 192.168.30.1 +kube_vip_bgp_peeras: "64512" + +kube_vip_bgp_peers: [] +kube_vip_bgp_peers_groups: ['k3s_master'] + +metal_lb_controller_tag_version: v0.14.3 +metal_lb_speaker_tag_version: v0.14.3 +metal_lb_type: native + +retry_count: 20 + +# yamllint disable rule:line-length +server_init_args: >- + {% if groups[group_name_master | default('master')] | length > 1 %} + {% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %} + --cluster-init + {% else %} + --server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443 + {% endif %} + --token {{ k3s_token }} + {% endif %} + {{ extra_server_args }} + +systemd_dir: /etc/systemd/system diff --git a/k3s-ansible-copia/roles/k3s_server/meta/main.yml b/k3s-ansible-copia/roles/k3s_server/meta/main.yml new file mode 100644 index 0000000..7d9fbfd --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/meta/main.yml @@ -0,0 +1,135 @@ +--- +argument_specs: + main: + short_description: Setup k3s servers + options: + apiserver_endpoint: + description: Virtual ip-address configured on each master + required: true + + cilium_bgp: + description: + - Enable cilium BGP control plane for LB services and pod cidrs. + - Disables the use of MetalLB. + type: bool + default: ~ + + cilium_iface: + description: The network interface used for when Cilium is enabled + default: ~ + + extra_server_args: + description: Extra arguments for server nodes + default: "" + + group_name_master: + description: Name of the master group + default: master + + k3s_create_kubectl_symlink: + description: Create the kubectl -> k3s symlink + default: false + type: bool + + k3s_create_crictl_symlink: + description: Create the crictl -> k3s symlink + default: false + type: bool + + kube_vip_arp: + description: Enables kube-vip ARP broadcasts + default: true + type: bool + + kube_vip_bgp: + description: Enables kube-vip BGP peering + default: false + type: bool + + kube_vip_bgp_routerid: + description: Defines the router ID for the kube-vip BGP server + default: "127.0.0.1" + + kube_vip_bgp_as: + description: Defines the AS for the kube-vip BGP server + default: "64513" + + kube_vip_bgp_peeraddress: + description: Defines the address for the kube-vip BGP peer + default: "192.168.30.1" + + kube_vip_bgp_peeras: + description: Defines the AS for the kube-vip BGP peer + default: "64512" + + kube_vip_bgp_peers: + description: List of BGP peer ASN & address pairs + default: [] + + kube_vip_bgp_peers_groups: + description: Inventory group in which to search for additional kube_vip_bgp_peers parameters to merge. + default: ['k3s_master'] + + kube_vip_iface: + description: + - Explicitly define an interface that ALL control nodes + - should use to propagate the VIP, define it here. + - Otherwise, kube-vip will determine the right interface + - automatically at runtime. + default: ~ + + kube_vip_tag_version: + description: Image tag for kube-vip + default: v0.7.2 + + kube_vip_cloud_provider_tag_version: + description: Tag for kube-vip-cloud-provider manifest when enabled + default: main + + kube_vip_lb_ip_range: + description: IP range for kube-vip load balancer + default: ~ + + metal_lb_controller_tag_version: + description: Image tag for MetalLB + default: v0.14.3 + + metal_lb_speaker_tag_version: + description: Image tag for MetalLB + default: v0.14.3 + + metal_lb_type: + choices: + - frr + - native + default: native + description: Use FRR mode or native. Valid values are `frr` and `native` + + proxy_env: + type: dict + description: + - Internet proxy configurations. + - See https://docs.k3s.io/advanced#configuring-an-http-proxy for details + default: ~ + options: + HTTP_PROXY: + description: HTTP internet proxy + required: true + HTTPS_PROXY: + description: HTTPS internet proxy + required: true + NO_PROXY: + description: Addresses that will not use the proxies + required: true + + retry_count: + description: Amount of retries when verifying that nodes joined + type: int + default: 20 + + server_init_args: + description: Arguments for server nodes + + systemd_dir: + description: Path to systemd services + default: /etc/systemd/system diff --git a/k3s-ansible-copia/roles/k3s_server/tasks/fetch_k3s_init_logs.yml b/k3s-ansible-copia/roles/k3s_server/tasks/fetch_k3s_init_logs.yml new file mode 100644 index 0000000..ae6f522 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/tasks/fetch_k3s_init_logs.yml @@ -0,0 +1,28 @@ +--- +# Download logs of k3s-init.service from the nodes to localhost. +# Note that log_destination must be set. + +- name: Fetch k3s-init.service logs + ansible.builtin.command: + cmd: journalctl --all --unit=k3s-init.service + changed_when: false + register: k3s_init_log + +- name: Create {{ log_destination }} + delegate_to: localhost + run_once: true + become: false + ansible.builtin.file: + path: "{{ log_destination }}" + state: directory + mode: "0755" + +- name: Store logs to {{ log_destination }} + delegate_to: localhost + become: false + ansible.builtin.template: + src: content.j2 + dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log" + mode: "0644" + vars: + content: "{{ k3s_init_log.stdout }}" diff --git a/k3s-ansible-copia/roles/k3s_server/tasks/http_proxy.yml b/k3s-ansible-copia/roles/k3s_server/tasks/http_proxy.yml new file mode 100644 index 0000000..5b7c534 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/tasks/http_proxy.yml @@ -0,0 +1,16 @@ +--- +- name: Create k3s.service.d directory + ansible.builtin.file: + path: "{{ systemd_dir }}/k3s.service.d" + state: directory + owner: root + group: root + mode: "0755" + +- name: Copy K3s http_proxy conf file + ansible.builtin.template: + src: http_proxy.conf.j2 + dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf" + owner: root + group: root + mode: "0755" diff --git a/k3s-ansible-copia/roles/k3s_server/tasks/kube-vip.yml b/k3s-ansible-copia/roles/k3s_server/tasks/kube-vip.yml new file mode 100644 index 0000000..f8b53e6 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/tasks/kube-vip.yml @@ -0,0 +1,27 @@ +--- +- name: Create manifests directory on first master + ansible.builtin.file: + path: /var/lib/rancher/k3s/server/manifests + state: directory + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + +- name: Download vip cloud provider manifest to first master + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length] + dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + +- name: Copy kubevip configMap manifest to first master + ansible.builtin.template: + src: kubevip.yaml.j2 + dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] diff --git a/k3s-ansible-copia/roles/k3s_server/tasks/main.yml b/k3s-ansible-copia/roles/k3s_server/tasks/main.yml new file mode 100644 index 0000000..8ebaad7 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/tasks/main.yml @@ -0,0 +1,173 @@ +--- +- name: Stop k3s-init + ansible.builtin.systemd: + name: k3s-init + state: stopped + failed_when: false + +# k3s-init won't work if the port is already in use +- name: Stop k3s + ansible.builtin.systemd: + name: k3s + state: stopped + failed_when: false + +- name: Clean previous runs of k3s-init # noqa command-instead-of-module + # The systemd module does not support "reset-failed", so we need to resort to command. + ansible.builtin.command: systemctl reset-failed k3s-init + failed_when: false + changed_when: false + +- name: Deploy K3s http_proxy conf + ansible.builtin.include_tasks: http_proxy.yml + when: proxy_env is defined + +- name: Deploy vip manifest + ansible.builtin.include_tasks: vip.yml +- name: Deploy metallb manifest + ansible.builtin.include_tasks: metallb.yml + tags: metallb + when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined) + +- name: Deploy kube-vip manifest + ansible.builtin.include_tasks: kube-vip.yml + tags: kubevip + when: kube_vip_lb_ip_range is defined + +- name: Init cluster inside the transient k3s-init service + ansible.builtin.command: + cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }} + creates: "{{ systemd_dir }}/k3s-init.service" + +- name: Verification + when: not ansible_check_mode + block: + - name: Verify that all nodes actually joined (check k3s-init.service if this fails) + ansible.builtin.command: + cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" # yamllint disable-line rule:line-length + register: nodes + until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length + retries: "{{ retry_count | default(20) }}" + delay: 10 + changed_when: false + always: + - name: Save logs of k3s-init.service + ansible.builtin.include_tasks: fetch_k3s_init_logs.yml + when: log_destination + vars: + log_destination: >- + {{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }} + - name: Kill the temporary service used for initialization + ansible.builtin.systemd: + name: k3s-init + state: stopped + failed_when: false + +- name: Copy K3s service file + register: k3s_service + ansible.builtin.template: + src: k3s.service.j2 + dest: "{{ systemd_dir }}/k3s.service" + owner: root + group: root + mode: "0644" + +- name: Enable and check K3s service + ansible.builtin.systemd: + name: k3s + daemon_reload: true + state: restarted + enabled: true + +- name: Wait for node-token + ansible.builtin.wait_for: + path: /var/lib/rancher/k3s/server/node-token + +- name: Register node-token file access mode + ansible.builtin.stat: + path: /var/lib/rancher/k3s/server + register: p + +- name: Change file access node-token + ansible.builtin.file: + path: /var/lib/rancher/k3s/server + mode: g+rx,o+rx + +- name: Read node-token from master + ansible.builtin.slurp: + src: /var/lib/rancher/k3s/server/node-token + register: node_token + +- name: Store Master node-token + ansible.builtin.set_fact: + token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" + +- name: Restore node-token file access + ansible.builtin.file: + path: /var/lib/rancher/k3s/server + mode: "{{ p.stat.mode }}" + +- name: Create directory .kube + ansible.builtin.file: + path: "{{ ansible_user_dir }}/.kube" + state: directory + owner: "{{ ansible_user_id }}" + mode: u=rwx,g=rx,o= + +- name: Copy config file to user home directory + ansible.builtin.copy: + src: /etc/rancher/k3s/k3s.yaml + dest: "{{ ansible_user_dir }}/.kube/config" + remote_src: true + owner: "{{ ansible_user_id }}" + mode: u=rw,g=,o= + +- name: Configure kubectl cluster to {{ endpoint_url }} + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} config set-cluster default + --server={{ endpoint_url }} + --kubeconfig {{ ansible_user_dir }}/.kube/config + changed_when: true + vars: + endpoint_url: >- + https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 +# Deactivated linter rules: +# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap +# would be undefined. This will not be the case during playbook execution. +# noqa jinja[invalid] + +- name: Create kubectl symlink + ansible.builtin.file: + src: /usr/local/bin/k3s + dest: /usr/local/bin/kubectl + state: link + when: k3s_create_kubectl_symlink | default(true) | bool + +- name: Create crictl symlink + ansible.builtin.file: + src: /usr/local/bin/k3s + dest: /usr/local/bin/crictl + state: link + when: k3s_create_crictl_symlink | default(true) | bool + +- name: Get contents of manifests folder + ansible.builtin.find: + paths: /var/lib/rancher/k3s/server/manifests + file_type: file + register: k3s_server_manifests + +- name: Get sub dirs of manifests folder + ansible.builtin.find: + paths: /var/lib/rancher/k3s/server/manifests + file_type: directory + register: k3s_server_manifests_directories + +- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + with_items: + - "{{ k3s_server_manifests.files }}" + - "{{ k3s_server_manifests_directories.files }}" + loop_control: + label: "{{ item.path }}" diff --git a/k3s-ansible-copia/roles/k3s_server/tasks/metallb.yml b/k3s-ansible-copia/roles/k3s_server/tasks/metallb.yml new file mode 100644 index 0000000..7624d16 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/tasks/metallb.yml @@ -0,0 +1,30 @@ +--- +- name: Create manifests directory on first master + ansible.builtin.file: + path: /var/lib/rancher/k3s/server/manifests + state: directory + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + +- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}" + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length] + dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + +- name: Set image versions in manifest for metallb-{{ metal_lb_type }} + ansible.builtin.replace: + path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml + regexp: "{{ item.change | ansible.builtin.regex_escape }}" + replace: "{{ item.to }}" + with_items: + - change: metallb/speaker:{{ metal_lb_controller_tag_version }} + to: metallb/speaker:{{ metal_lb_speaker_tag_version }} + loop_control: + label: "{{ item.change }} => {{ item.to }}" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] diff --git a/k3s-ansible-copia/roles/k3s_server/tasks/vip.yml b/k3s-ansible-copia/roles/k3s_server/tasks/vip.yml new file mode 100644 index 0000000..aba5b4f --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/tasks/vip.yml @@ -0,0 +1,31 @@ +--- +- name: Set _kube_vip_bgp_peers fact + ansible.builtin.set_fact: + _kube_vip_bgp_peers: "{{ lookup('community.general.merge_variables', '^kube_vip_bgp_peers__.+$', initial_value=kube_vip_bgp_peers, groups=kube_vip_bgp_peers_groups) }}" # yamllint disable-line rule:line-length + +- name: Create manifests directory on first master + ansible.builtin.file: + path: /var/lib/rancher/k3s/server/manifests + state: directory + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + +- name: Download vip rbac manifest to first master + ansible.builtin.get_url: + url: https://kube-vip.io/manifests/rbac.yaml + dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + +- name: Copy vip manifest to first master + ansible.builtin.template: + src: vip.yaml.j2 + dest: /var/lib/rancher/k3s/server/manifests/vip.yaml + owner: root + group: root + mode: "0644" + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] diff --git a/k3s-ansible-copia/roles/k3s_server/templates/content.j2 b/k3s-ansible-copia/roles/k3s_server/templates/content.j2 new file mode 100644 index 0000000..fe7fd8b --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/templates/content.j2 @@ -0,0 +1,5 @@ +{# + This is a really simple template that just outputs the + value of the "content" variable. +#} +{{ content }} diff --git a/k3s-ansible-copia/roles/k3s_server/templates/http_proxy.conf.j2 b/k3s-ansible-copia/roles/k3s_server/templates/http_proxy.conf.j2 new file mode 100644 index 0000000..6591d45 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/templates/http_proxy.conf.j2 @@ -0,0 +1,4 @@ +[Service] +Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }} +Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }} +Environment=NO_PROXY={{ proxy_env.NO_PROXY }} diff --git a/k3s-ansible-copia/roles/k3s_server/templates/k3s.service.j2 b/k3s-ansible-copia/roles/k3s_server/templates/k3s.service.j2 new file mode 100644 index 0000000..ae5cb48 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/templates/k3s.service.j2 @@ -0,0 +1,24 @@ +[Unit] +Description=Lightweight Kubernetes +Documentation=https://k3s.io +After=network-online.target + +[Service] +Type=notify +ExecStartPre=-/sbin/modprobe br_netfilter +ExecStartPre=-/sbin/modprobe overlay +ExecStart=/usr/local/bin/k3s server {{ extra_server_args | default("") }} +KillMode=process +Delegate=yes +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=1048576 +LimitNPROC=infinity +LimitCORE=infinity +TasksMax=infinity +TimeoutStartSec=0 +Restart=always +RestartSec=5s + +[Install] +WantedBy=multi-user.target diff --git a/k3s-ansible-copia/roles/k3s_server/templates/kubevip.yaml.j2 b/k3s-ansible-copia/roles/k3s_server/templates/kubevip.yaml.j2 new file mode 100644 index 0000000..40d8b50 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/templates/kubevip.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubevip + namespace: kube-system +data: +{% if kube_vip_lb_ip_range is string %} +{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #} +{# => transform to list with single element #} +{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %} +{% endif %} + range-global: {{ kube_vip_lb_ip_range | join(',') }} diff --git a/k3s-ansible-copia/roles/k3s_server/templates/vip.yaml.j2 b/k3s-ansible-copia/roles/k3s_server/templates/vip.yaml.j2 new file mode 100644 index 0000000..44469a6 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server/templates/vip.yaml.j2 @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + labels: + name: kube-vip-ds + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - manager + env: + - name: vip_arp + value: "{{ 'true' if kube_vip_arp | default(true) | bool else 'false' }}" + - name: bgp_enable + value: "{{ 'true' if kube_vip_bgp | default(false) | bool else 'false' }}" + - name: port + value: "6443" +{% if kube_vip_iface %} + - name: vip_interface + value: {{ kube_vip_iface }} +{% endif %} + - name: vip_cidr + value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: {{ apiserver_endpoint }} +{% if kube_vip_bgp | default(false) | bool %} +{% if kube_vip_bgp_routerid is defined %} + - name: bgp_routerid + value: "{{ kube_vip_bgp_routerid }}" +{% endif %} +{% if _kube_vip_bgp_peers | length > 0 %} + - name: bgppeers + value: "{{ _kube_vip_bgp_peers | map(attribute='peer_address') | zip(_kube_vip_bgp_peers| map(attribute='peer_asn')) | map('join', ',') | join(':') }}" # yamllint disable-line rule:line-length +{% else %} +{% if kube_vip_bgp_as is defined %} + - name: bgp_as + value: "{{ kube_vip_bgp_as }}" +{% endif %} +{% if kube_vip_bgp_peeraddress is defined %} + - name: bgp_peeraddress + value: "{{ kube_vip_bgp_peeraddress }}" +{% endif %} +{% if kube_vip_bgp_peeras is defined %} + - name: bgp_peeras + value: "{{ kube_vip_bgp_peeras }}" +{% endif %} +{% endif %} +{% endif %} + image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }} + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_TIME + hostNetwork: true + serviceAccountName: kube-vip + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 diff --git a/k3s-ansible-copia/roles/k3s_server_post/defaults/main.yml b/k3s-ansible-copia/roles/k3s_server_post/defaults/main.yml new file mode 100644 index 0000000..578e557 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/defaults/main.yml @@ -0,0 +1,32 @@ +--- +k3s_kubectl_binary: k3s kubectl + +bpf_lb_algorithm: maglev +bpf_lb_mode: hybrid + +calico_blockSize: 26 # noqa var-naming +calico_ebpf: false +calico_encapsulation: VXLANCrossSubnet +calico_natOutgoing: Enabled # noqa var-naming +calico_nodeSelector: all() # noqa var-naming +calico_tag: v3.27.2 + +cilium_bgp: false +cilium_exportPodCIDR: true # noqa var-naming +cilium_bgp_my_asn: 64513 +cilium_bgp_peer_asn: 64512 +cilium_bgp_neighbors: [] +cilium_bgp_neighbors_groups: ['k3s_all'] +cilium_bgp_lb_cidr: 192.168.31.0/24 +cilium_hubble: true +cilium_mode: native + +cluster_cidr: 10.52.0.0/16 +enable_bpf_masquerade: true +kube_proxy_replacement: true +group_name_master: master + +metal_lb_mode: layer2 +metal_lb_available_timeout: 240s +metal_lb_controller_tag_version: v0.14.3 +metal_lb_ip_range: 192.168.30.80-192.168.30.90 diff --git a/k3s-ansible-copia/roles/k3s_server_post/meta/main.yml b/k3s-ansible-copia/roles/k3s_server_post/meta/main.yml new file mode 100644 index 0000000..f9fc83d --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/meta/main.yml @@ -0,0 +1,153 @@ +--- +argument_specs: + main: + short_description: Configure k3s cluster + options: + apiserver_endpoint: + description: Virtual ip-address configured on each master + required: true + + bpf_lb_algorithm: + description: BPF lb algorithm + default: maglev + + bpf_lb_mode: + description: BPF lb mode + default: hybrid + + calico_blockSize: + description: IP pool block size + type: int + default: 26 + + calico_ebpf: + description: Use eBPF dataplane instead of iptables + type: bool + default: false + + calico_encapsulation: + description: IP pool encapsulation + default: VXLANCrossSubnet + + calico_natOutgoing: + description: IP pool NAT outgoing + default: Enabled + + calico_nodeSelector: + description: IP pool node selector + default: all() + + calico_iface: + description: The network interface used for when Calico is enabled + default: ~ + + calico_tag: + description: Calico version tag + default: v3.27.2 + + cilium_bgp: + description: + - Enable cilium BGP control plane for LB services and pod cidrs. + - Disables the use of MetalLB. + type: bool + default: false + + cilium_bgp_my_asn: + description: Local ASN for BGP peer + type: int + default: 64513 + + cilium_bgp_peer_asn: + description: BGP peer ASN + type: int + default: 64512 + + cilium_bgp_peer_address: + description: BGP peer address + default: ~ + + cilium_bgp_neighbors: + description: List of BGP peer ASN & address pairs + default: [] + + cilium_bgp_neighbors_groups: + description: Inventory group in which to search for additional cilium_bgp_neighbors parameters to merge. + default: ['k3s_all'] + + cilium_bgp_lb_cidr: + description: BGP load balancer IP range + default: 192.168.31.0/24 + + cilium_exportPodCIDR: + description: Export pod CIDR + type: bool + default: true + + cilium_hubble: + description: Enable Cilium Hubble + type: bool + default: true + + cilium_iface: + description: The network interface used for when Cilium is enabled + default: ~ + + cilium_mode: + description: Inner-node communication mode + default: native + choices: + - native + - routed + + cluster_cidr: + description: Inner-cluster IP range + default: 10.52.0.0/16 + + enable_bpf_masquerade: + description: Use IP masquerading + type: bool + default: true + + group_name_master: + description: Name of the master group + default: master + + kube_proxy_replacement: + description: Replace the native kube-proxy with Cilium + type: bool + default: true + + kube_vip_lb_ip_range: + description: IP range for kube-vip load balancer + default: ~ + + metal_lb_available_timeout: + description: Wait for MetalLB resources + default: 240s + + metal_lb_ip_range: + description: MetalLB ip range for load balancer + default: 192.168.30.80-192.168.30.90 + + metal_lb_controller_tag_version: + description: Image tag for MetalLB + default: v0.14.3 + + metal_lb_mode: + description: Metallb mode + default: layer2 + choices: + - bgp + - layer2 + + metal_lb_bgp_my_asn: + description: BGP ASN configurations + default: ~ + + metal_lb_bgp_peer_asn: + description: BGP peer ASN configurations + default: ~ + + metal_lb_bgp_peer_address: + description: BGP peer address + default: ~ diff --git a/k3s-ansible-copia/roles/k3s_server_post/tasks/calico.yml b/k3s-ansible-copia/roles/k3s_server_post/tasks/calico.yml new file mode 100644 index 0000000..2a9302f --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/tasks/calico.yml @@ -0,0 +1,120 @@ +--- +- name: Deploy Calico to cluster + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + run_once: true + block: + - name: Create manifests directory on first master + ansible.builtin.file: + path: /tmp/k3s + state: directory + owner: root + group: root + mode: "0755" + + - name: "Download to first master: manifest for Tigera Operator and Calico CRDs" + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml + dest: /tmp/k3s/tigera-operator.yaml + owner: root + group: root + mode: "0755" + + - name: Copy Calico custom resources manifest to first master + ansible.builtin.template: + src: calico.crs.j2 + dest: /tmp/k3s/custom-resources.yaml + owner: root + group: root + mode: "0755" + + - name: Deploy or replace Tigera Operator + block: + - name: Deploy Tigera Operator + ansible.builtin.command: + cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/tigera-operator.yaml" + register: create_operator + changed_when: "'created' in create_operator.stdout" + failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr" + rescue: + - name: Replace existing Tigera Operator + ansible.builtin.command: + cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} replace -f /tmp/k3s/tigera-operator.yaml" + register: replace_operator + changed_when: "'replaced' in replace_operator.stdout" + failed_when: "'Error' in replace_operator.stderr" + + - name: Wait for Tigera Operator resources + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }} + --namespace='tigera-operator' + --for=condition=Available=True + --timeout=30s + register: tigera_result + changed_when: false + until: tigera_result is succeeded + retries: 7 + delay: 7 + with_items: + - { name: tigera-operator, type: deployment } + loop_control: + label: "{{ item.type }}/{{ item.name }}" + + - name: Deploy Calico custom resources + block: + - name: Deploy custom resources for Calico + ansible.builtin.command: + cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/custom-resources.yaml" + register: create_cr + changed_when: "'created' in create_cr.stdout" + failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr" + rescue: + - name: Apply new Calico custom resource manifest + ansible.builtin.command: + cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/custom-resources.yaml" + register: apply_cr + changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout" + failed_when: "'Error' in apply_cr.stderr" + + - name: Wait for Calico system resources to be available + ansible.builtin.command: >- + {% if item.type == 'daemonset' %} + {{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods + --namespace='{{ item.namespace }}' + --selector={{ item.selector }} + --for=condition=Ready + {% else %} + {{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }} + --namespace='{{ item.namespace }}' + --for=condition=Available + {% endif %} + --timeout=30s + register: cr_result + changed_when: false + until: cr_result is succeeded + retries: 30 + delay: 7 + with_items: + - { name: calico-typha, type: deployment, namespace: calico-system } + - { name: calico-kube-controllers, type: deployment, namespace: calico-system } + - name: csi-node-driver + type: daemonset + selector: k8s-app=csi-node-driver + namespace: calico-system + - name: calico-node + type: daemonset + selector: k8s-app=calico-node + namespace: calico-system + - { name: calico-apiserver, type: deployment, namespace: calico-apiserver } + loop_control: + label: "{{ item.type }}/{{ item.name }}" + + - name: Patch Felix configuration for eBPF mode + ansible.builtin.command: + cmd: > + {{ k3s_kubectl_binary | default('k3s kubectl') }} patch felixconfiguration default + --type='merge' + --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}' + register: patch_result + changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout" + failed_when: "'Error' in patch_result.stderr" + when: calico_ebpf diff --git a/k3s-ansible-copia/roles/k3s_server_post/tasks/cilium.yml b/k3s-ansible-copia/roles/k3s_server_post/tasks/cilium.yml new file mode 100644 index 0000000..d7a48b0 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/tasks/cilium.yml @@ -0,0 +1,256 @@ +--- +- name: Prepare Cilium CLI on first master and deploy CNI + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] + run_once: true + block: + - name: Create tmp directory on first master + ansible.builtin.file: + path: /tmp/k3s + state: directory + owner: root + group: root + mode: "0755" + + - name: Check if Cilium CLI is installed + ansible.builtin.command: cilium version + register: cilium_cli_installed + failed_when: false + changed_when: false + ignore_errors: true + + - name: Check for Cilium CLI version in command output + ansible.builtin.set_fact: + installed_cli_version: >- + {{ + cilium_cli_installed.stdout_lines + | join(' ') + | regex_findall('cilium-cli: (v\d+\.\d+\.\d+)') + | first + | default('unknown') + }} + when: cilium_cli_installed.rc == 0 + + - name: Get latest stable Cilium CLI version file + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt + dest: /tmp/k3s/cilium-cli-stable.txt + owner: root + group: root + mode: "0755" + + - name: Read Cilium CLI stable version from file + ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt + register: cli_ver + changed_when: false + + - name: Log installed Cilium CLI version + ansible.builtin.debug: + msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}" + + - name: Log latest stable Cilium CLI version + ansible.builtin.debug: + msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}" + + - name: Determine if Cilium CLI needs installation or update + ansible.builtin.set_fact: + cilium_cli_needs_update: >- + {{ + cilium_cli_installed.rc != 0 or + (cilium_cli_installed.rc == 0 and + installed_cli_version != cli_ver.stdout) + }} + + - name: Install or update Cilium CLI + when: cilium_cli_needs_update + block: + - name: Set architecture variable + ansible.builtin.set_fact: + cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}" + + - name: Download Cilium CLI and checksum + ansible.builtin.get_url: + url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}" + dest: /tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }} + owner: root + group: root + mode: "0755" + loop: + - .tar.gz + - .tar.gz.sha256sum + vars: + cilium_base_url: https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }} + + - name: Verify the downloaded tarball + ansible.builtin.shell: | + cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum + args: + executable: /bin/bash + changed_when: false + + - name: Extract Cilium CLI to /usr/local/bin + ansible.builtin.unarchive: + src: /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz + dest: /usr/local/bin + remote_src: true + + - name: Remove downloaded tarball and checksum file + ansible.builtin.file: + path: "{{ item }}" + state: absent + loop: + - /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz + - /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum + + - name: Wait for connectivity to kube VIP + ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }} + register: ping_result + until: ping_result.rc == 0 + retries: 21 + delay: 1 + ignore_errors: true + changed_when: false + + - name: Fail if kube VIP not reachable + ansible.builtin.fail: + msg: API endpoint {{ apiserver_endpoint }} is not reachable + when: ping_result.rc != 0 + + - name: Test for existing Cilium install + ansible.builtin.command: | + {{ k3s_kubectl_binary | default('k3s kubectl') }} -n kube-system get daemonsets cilium + register: cilium_installed + failed_when: false + changed_when: false + ignore_errors: true + + - name: Check existing Cilium install + when: cilium_installed.rc == 0 + block: + - name: Check Cilium version + ansible.builtin.command: cilium version + register: cilium_version + failed_when: false + changed_when: false + ignore_errors: true + + - name: Parse installed Cilium version + ansible.builtin.set_fact: + installed_cilium_version: >- + {{ + cilium_version.stdout_lines + | join(' ') + | regex_findall('cilium image.+(\d+\.\d+\.\d+)') + | first + | default('unknown') + }} + + - name: Determine if Cilium needs update + ansible.builtin.set_fact: + cilium_needs_update: >- + {{ 'v' + installed_cilium_version != cilium_tag }} + + - name: Log result + ansible.builtin.debug: + msg: > + Installed Cilium version: {{ installed_cilium_version }}, + Target Cilium version: {{ cilium_tag }}, + Update needed: {{ cilium_needs_update }} + + - name: Install Cilium + ansible.builtin.command: >- + {% if cilium_installed.rc != 0 %} + cilium install + {% else %} + cilium upgrade + {% endif %} + --version "{{ cilium_tag }}" + --helm-set operator.replicas="1" + {{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }} + --helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }} + {% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %} + --helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }} + {% endif %} + --helm-set k8sServiceHost="127.0.0.1" + --helm-set k8sServicePort="6444" + --helm-set routingMode={{ cilium_mode }} + --helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }} + --helm-set kubeProxyReplacement={{ kube_proxy_replacement }} + --helm-set bpf.masquerade={{ enable_bpf_masquerade }} + --helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }} + --helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }} + --helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }} + --helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }} + {% if kube_proxy_replacement is not false %} + --helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm }} + --helm-set bpf.loadBalancer.mode={{ bpf_lb_mode }} + {% endif %} + environment: + KUBECONFIG: "{{ ansible_user_dir }}/.kube/config" + register: cilium_install_result + changed_when: cilium_install_result.rc == 0 + when: cilium_installed.rc != 0 or cilium_needs_update + + - name: Wait for Cilium resources + ansible.builtin.command: >- + {% if item.type == 'daemonset' %} + {{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods + --namespace=kube-system + --selector='k8s-app=cilium' + --for=condition=Ready + {% else %} + {{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }} + --namespace=kube-system + --for=condition=Available + {% endif %} + --timeout=30s + register: cr_result + changed_when: false + until: cr_result is succeeded + retries: 30 + delay: 7 + with_items: + - { name: cilium-operator, type: deployment } + - { name: cilium, type: daemonset, selector: k8s-app=cilium } + - { name: hubble-relay, type: deployment, check_hubble: true } + - { name: hubble-ui, type: deployment, check_hubble: true } + loop_control: + label: "{{ item.type }}/{{ item.name }}" + when: >- + not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble) + + - name: Configure Cilium BGP + when: cilium_bgp + block: + - name: Set _cilium_bgp_neighbors fact + ansible.builtin.set_fact: + _cilium_bgp_neighbors: "{{ lookup('community.general.merge_variables', '^cilium_bgp_neighbors__.+$', initial_value=cilium_bgp_neighbors, groups=cilium_bgp_neighbors_groups) }}" # yamllint disable-line rule:line-length + + - name: Copy BGP manifests to first master + ansible.builtin.template: + src: cilium.crs.j2 + dest: /tmp/k3s/cilium-bgp.yaml + owner: root + group: root + mode: "0755" + + - name: Apply BGP manifests + ansible.builtin.command: + cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/cilium-bgp.yaml" + register: apply_cr + changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout" + failed_when: "'is invalid' in apply_cr.stderr" + ignore_errors: true + + - name: Print error message if BGP manifests application fails + ansible.builtin.debug: + msg: "{{ apply_cr.stderr }}" + when: "'is invalid' in apply_cr.stderr" + + - name: Test for BGP config resources + ansible.builtin.command: "{{ item }}" + loop: + - "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumBGPPeeringPolicy.cilium.io" + - "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumLoadBalancerIPPool.cilium.io" + changed_when: false + loop_control: + label: "{{ item }}" diff --git a/k3s-ansible-copia/roles/k3s_server_post/tasks/main.yml b/k3s-ansible-copia/roles/k3s_server_post/tasks/main.yml new file mode 100644 index 0000000..1a02d8d --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Deploy calico + ansible.builtin.include_tasks: calico.yml + tags: calico + when: calico_iface is defined and cilium_iface is not defined + +- name: Deploy cilium + ansible.builtin.include_tasks: cilium.yml + tags: cilium + when: cilium_iface is defined + +- name: Deploy metallb pool + ansible.builtin.include_tasks: metallb.yml + tags: metallb + when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined) + +- name: Remove tmp directory used for manifests + ansible.builtin.file: + path: /tmp/k3s + state: absent diff --git a/k3s-ansible-copia/roles/k3s_server_post/tasks/metallb.yml b/k3s-ansible-copia/roles/k3s_server_post/tasks/metallb.yml new file mode 100644 index 0000000..4a3279c --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/tasks/metallb.yml @@ -0,0 +1,136 @@ +--- +- name: Create manifests directory for temp configuration + ansible.builtin.file: + path: /tmp/k3s + state: directory + owner: "{{ ansible_user_id }}" + mode: "0755" + with_items: "{{ groups[group_name_master | default('master')] }}" + run_once: true + +- name: Delete outdated metallb replicas + ansible.builtin.shell: |- + set -o pipefail + + REPLICAS=$({{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' get replicasets \ + -l 'component=controller,app=metallb' \ + -o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true) + REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g") + if [ -n "${REPLICAS_SETS}" ] ; then + for REPLICAS in "${REPLICAS_SETS}" + do + {{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' \ + delete rs "${REPLICAS}" + done + fi + args: + executable: /bin/bash + changed_when: false + run_once: true + with_items: "{{ groups[group_name_master | default('master')] }}" + +- name: Copy metallb CRs manifest to first master + ansible.builtin.template: + src: metallb.crs.j2 + dest: /tmp/k3s/metallb-crs.yaml + owner: "{{ ansible_user_id }}" + mode: "0755" + with_items: "{{ groups[group_name_master | default('master')] }}" + run_once: true + +- name: Test metallb-system namespace + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system + changed_when: false + with_items: "{{ groups[group_name_master | default('master')] }}" + run_once: true + +- name: Wait for MetalLB resources + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.resource }} + --namespace='metallb-system' + {% if item.name | default(False) -%}{{ item.name }}{%- endif %} + {% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %} + {% if item.condition | default(False) -%}{{ item.condition }}{%- endif %} + --timeout='{{ metal_lb_available_timeout }}' + changed_when: false + run_once: true + with_items: + - description: controller + resource: deployment + name: controller + condition: --for condition=Available=True + - description: webhook service + resource: pod + selector: component=controller + condition: --for=jsonpath='{.status.phase}'=Running + - description: pods in replica sets + resource: pod + selector: component=controller,app=metallb + condition: --for condition=Ready + - description: ready replicas of controller + resource: replicaset + selector: component=controller,app=metallb + condition: --for=jsonpath='{.status.readyReplicas}'=1 + - description: fully labeled replicas of controller + resource: replicaset + selector: component=controller,app=metallb + condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1 + - description: available replicas of controller + resource: replicaset + selector: component=controller,app=metallb + condition: --for=jsonpath='{.status.availableReplicas}'=1 + loop_control: + label: "{{ item.description }}" + +- name: Set metallb webhook service name + ansible.builtin.set_fact: + metallb_webhook_service_name: >- + {{ + ( + (metal_lb_controller_tag_version | regex_replace('^v', '')) + is + version('0.14.4', '<', version_type='semver') + ) | ternary( + 'webhook-service', + 'metallb-webhook-service' + ) + }} + +- name: Test metallb-system webhook-service endpoint + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get endpoints {{ metallb_webhook_service_name }} + changed_when: false + with_items: "{{ groups[group_name_master | default('master')] }}" + run_once: true + +- name: Apply metallb CRs + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/metallb-crs.yaml + --timeout='{{ metal_lb_available_timeout }}' + register: this + changed_when: false + run_once: true + until: this.rc == 0 + retries: 5 + +- name: Test metallb-system resources for Layer 2 configuration + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }} + changed_when: false + run_once: true + when: metal_lb_mode == "layer2" + with_items: + - IPAddressPool + - L2Advertisement + +- name: Test metallb-system resources for BGP configuration + ansible.builtin.command: >- + {{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }} + changed_when: false + run_once: true + when: metal_lb_mode == "bgp" + with_items: + - IPAddressPool + - BGPPeer + - BGPAdvertisement diff --git a/k3s-ansible-copia/roles/k3s_server_post/templates/calico.crs.j2 b/k3s-ansible-copia/roles/k3s_server_post/templates/calico.crs.j2 new file mode 100644 index 0000000..351b648 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/templates/calico.crs.j2 @@ -0,0 +1,41 @@ +# This section includes base Calico installation configuration. +# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation +apiVersion: operator.tigera.io/v1 +kind: Installation +metadata: + name: default +spec: + # Configures Calico networking. + calicoNetwork: + # Note: The ipPools section cannot be modified post-install. + ipPools: + - blockSize: {{ calico_blockSize }} + cidr: {{ cluster_cidr }} + encapsulation: {{ calico_encapsulation }} + natOutgoing: {{ calico_natOutgoing }} + nodeSelector: {{ calico_nodeSelector }} + nodeAddressAutodetectionV4: + interface: {{ calico_iface }} + linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }} + +--- + +# This section configures the Calico API server. +# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer +apiVersion: operator.tigera.io/v1 +kind: APIServer +metadata: + name: default +spec: {} + +{% if calico_ebpf %} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubernetes-services-endpoint + namespace: tigera-operator +data: + KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}' + KUBERNETES_SERVICE_PORT: '6443' +{% endif %} diff --git a/k3s-ansible-copia/roles/k3s_server_post/templates/cilium.crs.j2 b/k3s-ansible-copia/roles/k3s_server_post/templates/cilium.crs.j2 new file mode 100644 index 0000000..5a9e81c --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/templates/cilium.crs.j2 @@ -0,0 +1,48 @@ +apiVersion: "cilium.io/v2alpha1" +kind: CiliumBGPPeeringPolicy +metadata: + name: 01-bgp-peering-policy +spec: # CiliumBGPPeeringPolicySpec + virtualRouters: # []CiliumBGPVirtualRouter + - localASN: {{ cilium_bgp_my_asn }} + exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }} + neighbors: # []CiliumBGPNeighbor +{% if _cilium_bgp_neighbors | length > 0 %} +{% for item in _cilium_bgp_neighbors %} + - peerAddress: '{{ item.peer_address + "/32"}}' + peerASN: {{ item.peer_asn }} + eBGPMultihopTTL: 10 + connectRetryTimeSeconds: 120 + holdTimeSeconds: 90 + keepAliveTimeSeconds: 30 + gracefulRestart: + enabled: true + restartTimeSeconds: 120 +{% endfor %} +{% else %} + - peerAddress: '{{ cilium_bgp_peer_address + "/32"}}' + peerASN: {{ cilium_bgp_peer_asn }} + eBGPMultihopTTL: 10 + connectRetryTimeSeconds: 120 + holdTimeSeconds: 90 + keepAliveTimeSeconds: 30 + gracefulRestart: + enabled: true + restartTimeSeconds: 120 +{% endif %} + serviceSelector: + matchExpressions: + - {key: somekey, operator: NotIn, values: ['never-used-value']} +--- +apiVersion: "cilium.io/v2alpha1" +kind: CiliumLoadBalancerIPPool +metadata: + name: "01-lb-pool" +spec: + blocks: +{% if "/" in cilium_bgp_lb_cidr %} + - cidr: {{ cilium_bgp_lb_cidr }} +{% else %} + - start: {{ cilium_bgp_lb_cidr.split('-')[0] }} + stop: {{ cilium_bgp_lb_cidr.split('-')[1] }} +{% endif %} diff --git a/k3s-ansible-copia/roles/k3s_server_post/templates/metallb.crs.j2 b/k3s-ansible-copia/roles/k3s_server_post/templates/metallb.crs.j2 new file mode 100644 index 0000000..562f561 --- /dev/null +++ b/k3s-ansible-copia/roles/k3s_server_post/templates/metallb.crs.j2 @@ -0,0 +1,43 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: +{% if metal_lb_ip_range is string %} +{# metal_lb_ip_range was used in the legacy way: single string instead of a list #} +{# => transform to list with single element #} +{% set metal_lb_ip_range = [metal_lb_ip_range] %} +{% endif %} +{% for range in metal_lb_ip_range %} + - {{ range }} +{% endfor %} + +{% if metal_lb_mode == "layer2" %} +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: default + namespace: metallb-system +{% endif %} +{% if metal_lb_mode == "bgp" %} +--- +apiVersion: metallb.io/v1beta2 +kind: BGPPeer +metadata: + name: default + namespace: metallb-system +spec: + myASN: {{ metal_lb_bgp_my_asn }} + peerASN: {{ metal_lb_bgp_peer_asn }} + peerAddress: {{ metal_lb_bgp_peer_address }} + +--- +apiVersion: metallb.io/v1beta1 +kind: BGPAdvertisement +metadata: + name: default + namespace: metallb-system +{% endif %} diff --git a/k3s-ansible-copia/roles/lxc/handlers/main.yml b/k3s-ansible-copia/roles/lxc/handlers/main.yml new file mode 100644 index 0000000..1c0002d --- /dev/null +++ b/k3s-ansible-copia/roles/lxc/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Reboot server + become: true + ansible.builtin.reboot: + reboot_command: "{{ custom_reboot_command | default(omit) }}" + listen: reboot server diff --git a/k3s-ansible-copia/roles/lxc/meta/main.yml b/k3s-ansible-copia/roles/lxc/meta/main.yml new file mode 100644 index 0000000..42847df --- /dev/null +++ b/k3s-ansible-copia/roles/lxc/meta/main.yml @@ -0,0 +1,8 @@ +--- +argument_specs: + main: + short_description: Configure LXC + options: + custom_reboot_command: + default: ~ + description: Command to run on reboot diff --git a/k3s-ansible-copia/roles/lxc/tasks/main.yml b/k3s-ansible-copia/roles/lxc/tasks/main.yml new file mode 100644 index 0000000..3568687 --- /dev/null +++ b/k3s-ansible-copia/roles/lxc/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Check for rc.local file + ansible.builtin.stat: + path: /etc/rc.local + register: rcfile + +- name: Create rc.local if needed + ansible.builtin.lineinfile: + path: /etc/rc.local + line: "#!/bin/sh -e" + create: true + insertbefore: BOF + mode: u=rwx,g=rx,o=rx + when: not rcfile.stat.exists + +- name: Write rc.local file + ansible.builtin.blockinfile: + path: /etc/rc.local + content: "{{ lookup('template', 'templates/rc.local.j2') }}" + state: present + notify: reboot server diff --git a/k3s-ansible-copia/roles/prereq/defaults/main.yml b/k3s-ansible-copia/roles/prereq/defaults/main.yml new file mode 100644 index 0000000..850cbbf --- /dev/null +++ b/k3s-ansible-copia/roles/prereq/defaults/main.yml @@ -0,0 +1,4 @@ +--- +secure_path: + RedHat: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin + Suse: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin diff --git a/k3s-ansible-copia/roles/prereq/meta/main.yml b/k3s-ansible-copia/roles/prereq/meta/main.yml new file mode 100644 index 0000000..939124b --- /dev/null +++ b/k3s-ansible-copia/roles/prereq/meta/main.yml @@ -0,0 +1,7 @@ +--- +argument_specs: + main: + short_description: Prerequisites + options: + system_timezone: + description: Timezone to be set on all nodes diff --git a/k3s-ansible-copia/roles/prereq/tasks/main.yml b/k3s-ansible-copia/roles/prereq/tasks/main.yml new file mode 100644 index 0000000..08c4e19 --- /dev/null +++ b/k3s-ansible-copia/roles/prereq/tasks/main.yml @@ -0,0 +1,70 @@ +--- +- name: Set same timezone on every Server + become: yes + community.general.timezone: + name: "{{ system_timezone }}" + when: (system_timezone is defined) and (system_timezone != "Your/Timezone") + +- name: Set SELinux to disabled state + ansible.posix.selinux: + state: disabled + when: ansible_os_family == "RedHat" + +- name: Enable IPv4 forwarding + ansible.posix.sysctl: + name: net.ipv4.ip_forward + value: "1" + state: present + reload: true + tags: sysctl + +- name: Enable IPv6 forwarding + ansible.posix.sysctl: + name: net.ipv6.conf.all.forwarding + value: "1" + state: present + reload: true + tags: sysctl + +- name: Enable IPv6 router advertisements + ansible.posix.sysctl: + name: net.ipv6.conf.all.accept_ra + value: "2" + state: present + reload: true + tags: sysctl + +- name: Add br_netfilter to /etc/modules-load.d/ + ansible.builtin.copy: + content: br_netfilter + dest: /etc/modules-load.d/br_netfilter.conf + mode: u=rw,g=,o= + when: ansible_os_family == "RedHat" + +- name: Load br_netfilter + community.general.modprobe: + name: br_netfilter + state: present + when: ansible_os_family == "RedHat" + +- name: Set bridge-nf-call-iptables (just to be sure) + ansible.posix.sysctl: + name: "{{ item }}" + value: "1" + state: present + reload: true + when: ansible_os_family == "RedHat" + loop: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-ip6tables + tags: sysctl + +- name: Add /usr/local/bin to sudo secure_path + ansible.builtin.lineinfile: + line: Defaults secure_path = {{ secure_path[ansible_os_family] }} + regexp: Defaults(\s)*secure_path(\s)*= + state: present + insertafter: EOF + path: /etc/sudoers + validate: visudo -cf %s + when: ansible_os_family in [ "RedHat", "Suse" ] diff --git a/k3s-ansible-copia/roles/proxmox_lxc/handlers/main.yml b/k3s-ansible-copia/roles/proxmox_lxc/handlers/main.yml new file mode 100755 index 0000000..89a61e0 --- /dev/null +++ b/k3s-ansible-copia/roles/proxmox_lxc/handlers/main.yml @@ -0,0 +1,13 @@ +--- +- name: Reboot containers + block: + - name: Get container ids from filtered files + ansible.builtin.set_fact: + proxmox_lxc_filtered_ids: >- + {{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }} + listen: reboot containers + - name: Reboot container + ansible.builtin.command: pct reboot {{ item }} + loop: "{{ proxmox_lxc_filtered_ids }}" + changed_when: true + listen: reboot containers diff --git a/k3s-ansible-copia/roles/proxmox_lxc/meta/main.yml b/k3s-ansible-copia/roles/proxmox_lxc/meta/main.yml new file mode 100644 index 0000000..827c956 --- /dev/null +++ b/k3s-ansible-copia/roles/proxmox_lxc/meta/main.yml @@ -0,0 +1,9 @@ +--- +argument_specs: + main: + short_description: Proxmox LXC settings + options: + proxmox_lxc_ct_ids: + description: Proxmox container ID list + type: list + required: true diff --git a/k3s-ansible-copia/roles/proxmox_lxc/tasks/main.yml b/k3s-ansible-copia/roles/proxmox_lxc/tasks/main.yml new file mode 100644 index 0000000..5418cec --- /dev/null +++ b/k3s-ansible-copia/roles/proxmox_lxc/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Check for container files that exist on this host + ansible.builtin.stat: + path: /etc/pve/lxc/{{ item }}.conf + loop: "{{ proxmox_lxc_ct_ids }}" + register: stat_results + +- name: Filter out files that do not exist + ansible.builtin.set_fact: + proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length] + +# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 +- name: Ensure lxc config has the right apparmor profile + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.apparmor.profile + line: "lxc.apparmor.profile: unconfined" + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Ensure lxc config has the right cgroup + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.cgroup.devices.allow + line: "lxc.cgroup.devices.allow: a" + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Ensure lxc config has the right cap drop + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.cap.drop + line: "lxc.cap.drop: " + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Ensure lxc config has the right mounts + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.mount.auto + line: 'lxc.mount.auto: "proc:rw sys:rw"' + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers diff --git a/k3s-ansible-copia/roles/raspberrypi/defaults/main.yml b/k3s-ansible-copia/roles/raspberrypi/defaults/main.yml new file mode 100644 index 0000000..124fb90 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# Indicates whether the k3s prerequisites for Raspberry Pi should be set up +# Possible values: +# - present +# - absent +state: present diff --git a/k3s-ansible-copia/roles/raspberrypi/handlers/main.yml b/k3s-ansible-copia/roles/raspberrypi/handlers/main.yml new file mode 100644 index 0000000..c060793 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Reboot + ansible.builtin.reboot: + reboot_command: "{{ custom_reboot_command | default(omit) }}" + listen: reboot diff --git a/k3s-ansible-copia/roles/raspberrypi/meta/main.yml b/k3s-ansible-copia/roles/raspberrypi/meta/main.yml new file mode 100644 index 0000000..e2b9bad --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/meta/main.yml @@ -0,0 +1,10 @@ +--- +argument_specs: + main: + short_description: Adjust some Raspberry Pi specific requisites + options: + state: + default: present + description: + - Indicates whether the k3s prerequisites for Raspberry Pi should be + - set up (possible values are `present` and `absent`) diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/main.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/main.yml new file mode 100644 index 0000000..eb21c9a --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/main.yml @@ -0,0 +1,59 @@ +--- +- name: Test for raspberry pi /proc/cpuinfo + ansible.builtin.command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo + register: grep_cpuinfo_raspberrypi + failed_when: false + changed_when: false + +- name: Test for raspberry pi /proc/device-tree/model + ansible.builtin.command: grep -E "Raspberry Pi" /proc/device-tree/model + register: grep_device_tree_model_raspberrypi + failed_when: false + changed_when: false + +- name: Set raspberry_pi fact to true + ansible.builtin.set_fact: + raspberry_pi: true + when: grep_cpuinfo_raspberrypi.rc == 0 or grep_device_tree_model_raspberrypi.rc == 0 + +- name: Set detected_distribution to Raspbian (ARM64 on Raspbian, Debian Buster/Bullseye/Bookworm) + ansible.builtin.set_fact: + detected_distribution: Raspbian + vars: + allowed_descriptions: + - "[Rr]aspbian.*" + - Debian.*buster + - Debian.*bullseye + - Debian.*bookworm + when: + - ansible_facts.architecture is search("aarch64") + - raspberry_pi|default(false) + - ansible_facts.lsb.description|default("") is match(allowed_descriptions | join('|')) + +- name: Set detected_distribution to Raspbian (ARM64 on Debian Bookworm) + ansible.builtin.set_fact: + detected_distribution: Raspbian + when: + - ansible_facts.architecture is search("aarch64") + - raspberry_pi|default(false) + - ansible_facts.lsb.description|default("") is match("Debian.*bookworm") + +- name: Set detected_distribution_major_version + ansible.builtin.set_fact: + detected_distribution_major_version: "{{ ansible_facts.lsb.major_release }}" + when: + - detected_distribution | default("") == "Raspbian" + +- name: Execute OS related tasks on the Raspberry Pi - {{ action_ }} + ansible.builtin.include_tasks: "{{ item }}" + with_first_found: + - "{{ action_ }}/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" + - "{{ action_ }}/{{ detected_distribution }}.yml" + - "{{ action_ }}/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml" + - "{{ action_ }}/{{ ansible_distribution }}.yml" + - "{{ action_ }}/default.yml" + vars: + action_: >- + {% if state == "present" %}setup{% else %}teardown{% endif %} + when: + - raspberry_pi|default(false) diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Raspbian.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Raspbian.yml new file mode 100644 index 0000000..1d0a8cd --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Raspbian.yml @@ -0,0 +1,49 @@ +--- +- name: Test for cmdline path + ansible.builtin.stat: + path: /boot/firmware/cmdline.txt + register: boot_cmdline_path + failed_when: false + changed_when: false + +- name: Set cmdline path based on Debian version and command result + ansible.builtin.set_fact: + cmdline_path: >- + {{ + ( + boot_cmdline_path.stat.exists and + ansible_facts.lsb.description | default('') is match('Debian.*(?!(bookworm|sid))') + ) | ternary( + '/boot/firmware/cmdline.txt', + '/boot/cmdline.txt' + ) + }} + +- name: Activating cgroup support + ansible.builtin.lineinfile: + path: "{{ cmdline_path }}" + regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$ + line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory + backrefs: true + notify: reboot + +- name: Install iptables + ansible.builtin.apt: + name: iptables + state: present + +- name: Flush iptables before changing to iptables-legacy + ansible.builtin.iptables: + flush: true + +- name: Changing to iptables-legacy + community.general.alternatives: + path: /usr/sbin/iptables-legacy + name: iptables + register: ip4_legacy + +- name: Changing to ip6tables-legacy + community.general.alternatives: + path: /usr/sbin/ip6tables-legacy + name: ip6tables + register: ip6_legacy diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Rocky.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Rocky.yml new file mode 100644 index 0000000..2f756cb --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Rocky.yml @@ -0,0 +1,9 @@ +--- +- name: Enable cgroup via boot commandline if not already enabled for Rocky + ansible.builtin.lineinfile: + path: /boot/cmdline.txt + backrefs: true + regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$ + line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory + notify: reboot + when: not ansible_check_mode diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Ubuntu.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Ubuntu.yml new file mode 100644 index 0000000..07f20a8 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/Ubuntu.yml @@ -0,0 +1,14 @@ +--- +- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi + ansible.builtin.lineinfile: + path: /boot/firmware/cmdline.txt + backrefs: true + regexp: ^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$ + line: \1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory + notify: reboot + +- name: Install linux-modules-extra-raspi + ansible.builtin.apt: + name: linux-modules-extra-raspi + state: present + when: ansible_distribution_version is version('24.04', '<') diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/setup/default.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/default.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/setup/default.yml @@ -0,0 +1 @@ +--- diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Raspbian.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Raspbian.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Raspbian.yml @@ -0,0 +1 @@ +--- diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Rocky.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Rocky.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Rocky.yml @@ -0,0 +1 @@ +--- diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Ubuntu.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Ubuntu.yml new file mode 100644 index 0000000..681068a --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/Ubuntu.yml @@ -0,0 +1,6 @@ +--- +- name: Remove linux-modules-extra-raspi + ansible.builtin.apt: + name: linux-modules-extra-raspi + state: absent + when: ansible_distribution_version is version('24.04', '<') diff --git a/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/default.yml b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/default.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/k3s-ansible-copia/roles/raspberrypi/tasks/teardown/default.yml @@ -0,0 +1 @@ +--- diff --git a/k3s-ansible-copia/roles/reset/defaults/main.yml b/k3s-ansible-copia/roles/reset/defaults/main.yml new file mode 100644 index 0000000..0b45925 --- /dev/null +++ b/k3s-ansible-copia/roles/reset/defaults/main.yml @@ -0,0 +1,2 @@ +--- +systemd_dir: /etc/systemd/system diff --git a/k3s-ansible-copia/roles/reset/meta/main.yml b/k3s-ansible-copia/roles/reset/meta/main.yml new file mode 100644 index 0000000..830e019 --- /dev/null +++ b/k3s-ansible-copia/roles/reset/meta/main.yml @@ -0,0 +1,8 @@ +--- +argument_specs: + main: + short_description: Reset all nodes + options: + systemd_dir: + description: Path to systemd services + default: /etc/systemd/system diff --git a/k3s-ansible-copia/roles/reset/tasks/main.yml b/k3s-ansible-copia/roles/reset/tasks/main.yml new file mode 100644 index 0000000..6fba44b --- /dev/null +++ b/k3s-ansible-copia/roles/reset/tasks/main.yml @@ -0,0 +1,96 @@ +--- +- name: Disable services + ansible.builtin.systemd: + name: "{{ item }}" + state: stopped + enabled: false + failed_when: false + with_items: + - k3s + - k3s-node + - k3s-init + +- name: RUN pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" + register: pkill_containerd_shim_runc + ansible.builtin.command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" + changed_when: pkill_containerd_shim_runc.rc == 0 + failed_when: false + +- name: Umount k3s filesystems + ansible.builtin.include_tasks: umount_with_children.yml + with_items: + - /run/k3s + - /var/lib/kubelet + - /run/netns + - /var/lib/rancher/k3s + - /var/lib/kubelet/pods + - /var/lib/kubelet/plugins + - /run/netns/cni- + loop_control: + loop_var: mounted_fs + +- name: Remove service files, binaries and data + ansible.builtin.file: + name: "{{ item }}" + state: absent + with_items: + - /usr/local/bin/k3s + - "{{ systemd_dir }}/k3s.service" + - "{{ systemd_dir }}/k3s-node.service" + - /etc/rancher/k3s + - /run/k3s + - /run/flannel + - /etc/rancher/ + - /var/lib/kubelet + - /var/lib/rancher/k3s + - /var/lib/rancher/ + - /var/lib/cni/ + - /etc/cni/net.d + +- name: Remove K3s http_proxy files + ansible.builtin.file: + name: "{{ item }}" + state: absent + with_items: + - "{{ systemd_dir }}/k3s.service.d/http_proxy.conf" + - "{{ systemd_dir }}/k3s.service.d" + - "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf" + - "{{ systemd_dir }}/k3s-node.service.d" + when: proxy_env is defined + +- name: Reload daemon_reload + ansible.builtin.systemd: + daemon_reload: true + +- name: Remove tmp directory used for manifests + ansible.builtin.file: + path: /tmp/k3s + state: absent + +- name: Check if rc.local exists + ansible.builtin.stat: + path: /etc/rc.local + register: rcfile + +- name: Remove rc.local modifications for proxmox lxc containers + become: true + ansible.builtin.blockinfile: + path: /etc/rc.local + content: "{{ lookup('template', 'templates/rc.local.j2') }}" + create: false + state: absent + when: proxmox_lxc_configure and rcfile.stat.exists + +- name: Check rc.local for cleanup + become: true + ansible.builtin.slurp: + src: /etc/rc.local + register: rcslurp + when: proxmox_lxc_configure and rcfile.stat.exists + +- name: Cleanup rc.local if we only have a Shebang line + become: true + ansible.builtin.file: + path: /etc/rc.local + state: absent + when: proxmox_lxc_configure and rcfile.stat.exists and ((rcslurp.content | b64decode).splitlines() | length) <= 1 diff --git a/k3s-ansible-copia/roles/reset/tasks/umount_with_children.yml b/k3s-ansible-copia/roles/reset/tasks/umount_with_children.yml new file mode 100644 index 0000000..e540820 --- /dev/null +++ b/k3s-ansible-copia/roles/reset/tasks/umount_with_children.yml @@ -0,0 +1,15 @@ +--- +- name: Get the list of mounted filesystems + ansible.builtin.shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}" + register: get_mounted_filesystems + args: + executable: /bin/bash + failed_when: false + changed_when: get_mounted_filesystems.stdout | length > 0 + check_mode: false + +- name: Umount filesystem + ansible.posix.mount: + path: "{{ item }}" + state: unmounted + with_items: "{{ get_mounted_filesystems.stdout_lines | reverse | list }}" diff --git a/k3s-ansible-copia/roles/reset_proxmox_lxc/handlers/main.yml b/k3s-ansible-copia/roles/reset_proxmox_lxc/handlers/main.yml new file mode 120000 index 0000000..7f79c4b --- /dev/null +++ b/k3s-ansible-copia/roles/reset_proxmox_lxc/handlers/main.yml @@ -0,0 +1 @@ +../../proxmox_lxc/handlers/main.yml \ No newline at end of file diff --git a/k3s-ansible-copia/roles/reset_proxmox_lxc/meta/main.yml b/k3s-ansible-copia/roles/reset_proxmox_lxc/meta/main.yml new file mode 100644 index 0000000..827c956 --- /dev/null +++ b/k3s-ansible-copia/roles/reset_proxmox_lxc/meta/main.yml @@ -0,0 +1,9 @@ +--- +argument_specs: + main: + short_description: Proxmox LXC settings + options: + proxmox_lxc_ct_ids: + description: Proxmox container ID list + type: list + required: true diff --git a/k3s-ansible-copia/roles/reset_proxmox_lxc/tasks/main.yml b/k3s-ansible-copia/roles/reset_proxmox_lxc/tasks/main.yml new file mode 100644 index 0000000..78faf5f --- /dev/null +++ b/k3s-ansible-copia/roles/reset_proxmox_lxc/tasks/main.yml @@ -0,0 +1,46 @@ +--- +- name: Check for container files that exist on this host + ansible.builtin.stat: + path: /etc/pve/lxc/{{ item }}.conf + loop: "{{ proxmox_lxc_ct_ids }}" + register: stat_results + +- name: Filter out files that do not exist + ansible.builtin.set_fact: + proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length] + +- name: Remove LXC apparmor profile + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.apparmor.profile + line: "lxc.apparmor.profile: unconfined" + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Remove lxc cgroups + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.cgroup.devices.allow + line: "lxc.cgroup.devices.allow: a" + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Remove lxc cap drop + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.cap.drop + line: "lxc.cap.drop: " + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers + +- name: Remove lxc mounts + ansible.builtin.lineinfile: + dest: "{{ item }}" + regexp: ^lxc.mount.auto + line: 'lxc.mount.auto: "proc:rw sys:rw"' + state: absent + loop: "{{ proxmox_lxc_filtered_files }}" + notify: reboot containers diff --git a/k3s-ansible-copia/site.yml b/k3s-ansible-copia/site.yml new file mode 100644 index 0000000..b656e56 --- /dev/null +++ b/k3s-ansible-copia/site.yml @@ -0,0 +1,68 @@ +--- +- name: Pre tasks + hosts: all + pre_tasks: + - name: Verify Ansible is version 2.11 or above. (If this fails you may need to update Ansible) + ansible.builtin.assert: + that: ansible_version.full is version_compare('2.11', '>=') + msg: > + "Ansible is out of date. See here for more info: https://docs.technotim.live/posts/ansible-automation/" + +- name: Prepare Proxmox cluster + hosts: proxmox + gather_facts: true + become: true + environment: "{{ proxy_env | default({}) }}" + roles: + - role: proxmox_lxc + when: proxmox_lxc_configure + +- name: Prepare k3s nodes + hosts: k3s_cluster + gather_facts: true + environment: "{{ proxy_env | default({}) }}" + roles: + - role: lxc + become: true + when: proxmox_lxc_configure + - role: prereq + become: true + - role: download + become: true + - role: raspberrypi + become: true + - role: k3s_custom_registries + become: true + when: custom_registries + +- name: Setup k3s servers + hosts: master + environment: "{{ proxy_env | default({}) }}" + roles: + - role: k3s_server + become: true + +- name: Setup k3s agents + hosts: node + environment: "{{ proxy_env | default({}) }}" + roles: + - role: k3s_agent + become: true + +- name: Configure k3s cluster + hosts: master + environment: "{{ proxy_env | default({}) }}" + roles: + - role: k3s_server_post + become: true + +- name: Storing kubeconfig in the playbook directory + hosts: master + environment: "{{ proxy_env | default({}) }}" + tasks: + - name: Copying kubeconfig from {{ hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] }} + ansible.builtin.fetch: + src: "{{ ansible_user_dir }}/.kube/config" + dest: ./kubeconfig + flat: true + when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] diff --git a/k3s-ansible-copia/templates/rc.local.j2 b/k3s-ansible-copia/templates/rc.local.j2 new file mode 100644 index 0000000..16ca666 --- /dev/null +++ b/k3s-ansible-copia/templates/rc.local.j2 @@ -0,0 +1,8 @@ +# Kubeadm 1.15 needs /dev/kmsg to be there, but it's not in lxc, but we can just use /dev/console instead +# see: https://github.com/kubernetes-sigs/kind/issues/662 +if [ ! -e /dev/kmsg ]; then + ln -s /dev/console /dev/kmsg +fi + +# https://medium.com/@kvaps/run-kubernetes-in-lxc-container-f04aa94b6c9c +mount --make-rshared / diff --git a/k3s-ansible-copia/xclip b/k3s-ansible-copia/xclip new file mode 100644 index 0000000..065a9f9 --- /dev/null +++ b/k3s-ansible-copia/xclip @@ -0,0 +1,18 @@ +I0312 17:14:11.285448 1 main.go:211] CLI flags config: {etcdEndpoints:http://127.0.0.1:4001,http://127.0.0.1:2379 etcdPrefix:/coreos.com/network etcdKeyfile: etcdCertfile: etcdCAFile: etcdUsername: etcdPassword: version:false kubeSubnetMgr:true kubeApiUrl: kubeAnnotationPrefix:flannel.alpha.coreos.com kubeConfigFile: iface:[] ifaceRegex:[] ipMasq:true ifaceCanReach: subnetFile:/run/flannel/subnet.env publicIP: publicIPv6: subnetLeaseRenewMargin:60 healthzIP:0.0.0.0 healthzPort:0 iptablesResyncSeconds:5 iptablesForwardRules:true netConfPath:/etc/kube-flannel/net-conf.json setNodeNetworkUnavailable:true} +W0312 17:14:11.285516 1 client_config.go:618] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. +I0312 17:14:11.291436 1 kube.go:139] Waiting 10m0s for node controller to sync +I0312 17:14:11.291467 1 kube.go:469] Starting kube subnet manager +I0312 17:14:11.292837 1 kube.go:490] Creating the node lease for IPv4. This is the n.Spec.PodCIDRs: [10.42.0.0/24] +I0312 17:14:12.291660 1 kube.go:146] Node controller sync successful +I0312 17:14:12.291709 1 main.go:231] Created subnet manager: Kubernetes Subnet Manager - casca +I0312 17:14:12.291718 1 main.go:234] Installing signal handlers +I0312 17:14:12.291835 1 main.go:468] Found network config - Backend type: vxlan +I0312 17:14:12.296646 1 kube.go:669] List of node(casca) annotations: map[string]string{"alpha.kubernetes.io/provided-node-ip":"192.168.1.133", "flannel.alpha.coreos.com/backend-data":"{\"VNI\":1,\"VtepMAC\":\"8e:1b:3c:71:96:01\"}", "flannel.alpha.coreos.com/backend-type":"vxlan", "flannel.alpha.coreos.com/kube-subnet-manager":"true", "flannel.alpha.coreos.com/public-ip":"192.168.1.133", "k3s.io/hostname":"CASCA", "k3s.io/internal-ip":"192.168.1.133", "k3s.io/node-args":"[\"server\",\"--flannel-backend\",\"none\",\"--token\",\"********\"]", "k3s.io/node-config-hash":"EC72RJBT2ODREIIW72ZM7V5VCX6HHTLU3MR635DGCNCGIXLUK2RQ====", "k3s.io/node-env":"{}", "node.alpha.kubernetes.io/ttl":"0", "volumes.kubernetes.io/controller-managed-attach-detach":"true"} +I0312 17:14:12.296736 1 match.go:211] Determining IP address of default interface +I0312 17:14:12.297417 1 match.go:264] Using interface with name enp2s0 and address 192.168.1.133 +I0312 17:14:12.297462 1 match.go:286] Defaulting external address to interface address (192.168.1.133) +I0312 17:14:12.297533 1 vxlan.go:141] VXLAN config: VNI=1 Port=0 GBP=false Learning=false DirectRouting=false +I0312 17:14:12.301192 1 kube.go:636] List of node(casca) annotations: map[string]string{"alpha.kubernetes.io/provided-node-ip":"192.168.1.133", "flannel.alpha.coreos.com/backend-data":"{\"VNI\":1,\"VtepMAC\":\"8e:1b:3c:71:96:01\"}", "flannel.alpha.coreos.com/backend-type":"vxlan", "flannel.alpha.coreos.com/kube-subnet-manager":"true", "flannel.alpha.coreos.com/public-ip":"192.168.1.133", "k3s.io/hostname":"CASCA", "k3s.io/internal-ip":"192.168.1.133", "k3s.io/node-args":"[\"server\",\"--flannel-backend\",\"none\",\"--token\",\"********\"]", "k3s.io/node-config-hash":"EC72RJBT2ODREIIW72ZM7V5VCX6HHTLU3MR635DGCNCGIXLUK2RQ====", "k3s.io/node-env":"{}", "node.alpha.kubernetes.io/ttl":"0", "volumes.kubernetes.io/controller-managed-attach-detach":"true"} +I0312 17:14:12.301246 1 vxlan.go:155] Interface flannel.1 mac address set to: 8e:1b:3c:71:96:01 +E0312 17:14:12.301685 1 main.go:359] Error registering network: failed to acquire lease: subnet "10.244.0.0/16" specified in the flannel net config doesn't contain "10.42.0.0/24" PodCIDR of the "casca" node +I0312 17:14:12.301771 1 main.go:448] Stopping shutdownHandler... diff --git a/k3s-ansible/roles/proxmox_lxc/handlers/main.yml b/k3s-ansible/roles/proxmox_lxc/handlers/main.yml old mode 100644 new mode 100755