torre
This commit is contained in:
parent
ecb65e5f29
commit
beb0d4dbff
21
k3s-ansible-copia/.ansible-lint
Normal file
21
k3s-ansible-copia/.ansible-lint
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
profile: production
|
||||
exclude_paths:
|
||||
# default paths
|
||||
- .cache/
|
||||
- .github/
|
||||
- test/fixtures/formatting-before/
|
||||
- test/fixtures/formatting-prettier/
|
||||
|
||||
# The "converge" and "reset" playbooks use import_playbook in
|
||||
# conjunction with the "env" lookup plugin, which lets the
|
||||
# syntax check of ansible-lint fail.
|
||||
- molecule/**/converge.yml
|
||||
- molecule/**/prepare.yml
|
||||
- molecule/**/reset.yml
|
||||
|
||||
# The file was generated by galaxy ansible - don't mess with it.
|
||||
- galaxy.yml
|
||||
|
||||
skip_list:
|
||||
- var-naming[no-role-prefix]
|
||||
13
k3s-ansible-copia/.editorconfig
Normal file
13
k3s-ansible-copia/.editorconfig
Normal file
@ -0,0 +1,13 @@
|
||||
root = true
|
||||
[*]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
end_of_line = lf
|
||||
max_line_length = off
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
35
k3s-ansible-copia/.pre-commit-config.yaml
Normal file
35
k3s-ansible-copia/.pre-commit-config.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: requirements-txt-fixer
|
||||
- id: sort-simple-yaml
|
||||
- id: detect-private-key
|
||||
- id: check-merge-conflict
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
args: [--markdown-linebreak-ext=md]
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.33.0
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [-c=.yamllint]
|
||||
- repo: https://github.com/ansible-community/ansible-lint.git
|
||||
rev: v6.22.2
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.9.0.6
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
- repo: https://github.com/Lucas-C/pre-commit-hooks
|
||||
rev: v1.5.4
|
||||
hooks:
|
||||
- id: remove-crlf
|
||||
- id: remove-tabs
|
||||
- repo: https://github.com/sirosen/texthooks
|
||||
rev: 0.6.4
|
||||
hooks:
|
||||
- id: fix-smartquotes
|
||||
20
k3s-ansible-copia/.yamllint
Normal file
20
k3s-ansible-copia/.yamllint
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
comments-indentation: false
|
||||
braces:
|
||||
max-spaces-inside: 1
|
||||
octal-values:
|
||||
forbid-implicit-octal: true
|
||||
forbid-explicit-octal: true
|
||||
line-length:
|
||||
max: 120
|
||||
level: warning
|
||||
truthy:
|
||||
allowed-values: ["true", "false"]
|
||||
|
||||
ignore:
|
||||
- galaxy.yml
|
||||
177
k3s-ansible-copia/LICENSE
Normal file
177
k3s-ansible-copia/LICENSE
Normal file
@ -0,0 +1,177 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
235
k3s-ansible-copia/README.md
Normal file
235
k3s-ansible-copia/README.md
Normal file
@ -0,0 +1,235 @@
|
||||
# Autoomated build of HA k3s Cluster with `kube-vip` and MetalLB
|
||||
|
||||

|
||||
|
||||
This playbook will build an HA Kubernetes cluster with `k3s`, `kube-vip` and MetalLB via `ansible`.
|
||||
|
||||
This is based on the work from [this fork](https://github.com/212850a/k3s-ansible) which is based on the work from [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible). It uses [kube-vip](https://kube-vip.io/) to create a load balancer for control plane, and [metal-lb](https://metallb.universe.tf/installation/) for its service `LoadBalancer`.
|
||||
|
||||
If you want more context on how this works, see:
|
||||
|
||||
📄 [Documentation](https://technotim.live/posts/k3s-etcd-ansible/) (including example commands)
|
||||
|
||||
📺 [Watch the Video](https://www.youtube.com/watch?v=CbkEWcUZ7zM)
|
||||
|
||||
## 📖 k3s Ansible Playbook
|
||||
|
||||
Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a HA Kubernetes cluster on machines running:
|
||||
|
||||
- [x] Debian (tested on version 11)
|
||||
- [x] Ubuntu (tested on version 22.04)
|
||||
- [x] Rocky (tested on version 9)
|
||||
|
||||
on processor architecture:
|
||||
|
||||
- [X] x64
|
||||
- [X] arm64
|
||||
- [X] armhf
|
||||
|
||||
## ✅ System requirements
|
||||
|
||||
- Control Node (the machine you are running `ansible` commands) must have Ansible 2.11+ If you need a quick primer on Ansible [you can check out my docs and setting up Ansible](https://technotim.live/posts/ansible-automation/).
|
||||
|
||||
- You will also need to install collections that this playbook uses by running `ansible-galaxy collection install -r ./collections/requirements.yml` (important❗)
|
||||
|
||||
- [`netaddr` package](https://pypi.org/project/netaddr/) must be available to Ansible. If you have installed Ansible via apt, this is already taken care of. If you have installed Ansible via `pip`, make sure to install `netaddr` into the respective virtual environment.
|
||||
|
||||
- `server` and `agent` nodes should have passwordless SSH access, if not you can supply arguments to provide credentials `--ask-pass --ask-become-pass` to each command.
|
||||
|
||||
## 🚀 Getting Started
|
||||
|
||||
### 🍴 Preparation
|
||||
|
||||
First create a new directory based on the `sample` directory within the `inventory` directory:
|
||||
|
||||
```bash
|
||||
cp -R inventory/sample inventory/my-cluster
|
||||
```
|
||||
|
||||
Second, edit `inventory/my-cluster/hosts.ini` to match the system information gathered above
|
||||
|
||||
For example:
|
||||
|
||||
```ini
|
||||
[master]
|
||||
192.168.30.38
|
||||
192.168.30.39
|
||||
192.168.30.40
|
||||
|
||||
[node]
|
||||
192.168.30.41
|
||||
192.168.30.42
|
||||
|
||||
[k3s_cluster:children]
|
||||
master
|
||||
node
|
||||
```
|
||||
|
||||
If multiple hosts are in the master group, the playbook will automatically set up k3s in [HA mode with etcd](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/).
|
||||
|
||||
Finally, copy `ansible.example.cfg` to `ansible.cfg` and adapt the inventory path to match the files that you just created.
|
||||
|
||||
This requires at least k3s version `1.19.1` however the version is configurable by using the `k3s_version` variable.
|
||||
|
||||
If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment.
|
||||
|
||||
### ☸️ Create Cluster
|
||||
|
||||
Start provisioning of the cluster using the following command:
|
||||
|
||||
```bash
|
||||
ansible-playbook site.yml -i inventory/my-cluster/hosts.ini
|
||||
```
|
||||
|
||||
After deployment control plane will be accessible via virtual ip-address which is defined in inventory/group_vars/all.yml as `apiserver_endpoint`
|
||||
|
||||
### 🔥 Remove k3s cluster
|
||||
|
||||
```bash
|
||||
ansible-playbook reset.yml -i inventory/my-cluster/hosts.ini
|
||||
```
|
||||
|
||||
>You should also reboot these nodes due to the VIP not being destroyed
|
||||
|
||||
## ⚙️ Kube Config
|
||||
|
||||
To copy your `kube config` locally so that you can access your **Kubernetes** cluster run:
|
||||
|
||||
```bash
|
||||
scp debian@master_ip:/etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
```
|
||||
If you get file Permission denied, go into the node and temporarly run:
|
||||
```bash
|
||||
sudo chmod 777 /etc/rancher/k3s/k3s.yaml
|
||||
```
|
||||
Then copy with the scp command and reset the permissions back to:
|
||||
```bash
|
||||
sudo chmod 600 /etc/rancher/k3s/k3s.yaml
|
||||
```
|
||||
|
||||
You'll then want to modify the config to point to master IP by running:
|
||||
```bash
|
||||
sudo nano ~/.kube/config
|
||||
```
|
||||
Then change `server: https://127.0.0.1:6443` to match your master IP: `server: https://192.168.1.222:6443`
|
||||
|
||||
### 🔨 Testing your cluster
|
||||
|
||||
See the commands [here](https://technotim.live/posts/k3s-etcd-ansible/#testing-your-cluster).
|
||||
|
||||
### Variables
|
||||
|
||||
| Role(s) | Variable | Type | Default | Required | Description |
|
||||
|---|---|---|---|---|---|
|
||||
| `download` | `k3s_version` | string | ❌ | Required | K3s binaries version |
|
||||
| `k3s_agent`, `k3s_server`, `k3s_server_post` | `apiserver_endpoint` | string | ❌ | Required | Virtual ip-address configured on each master |
|
||||
| `k3s_agent` | `extra_agent_args` | string | `null` | Not required | Extra arguments for agents nodes |
|
||||
| `k3s_agent`, `k3s_server` | `group_name_master` | string | `null` | Not required | Name othe master group |
|
||||
| `k3s_agent` | `k3s_token` | string | `null` | Not required | Token used to communicate between masters |
|
||||
| `k3s_agent`, `k3s_server` | `proxy_env` | dict | `null` | Not required | Internet proxy configurations |
|
||||
| `k3s_agent`, `k3s_server` | `proxy_env.HTTP_PROXY` | string | ❌ | Required | HTTP internet proxy |
|
||||
| `k3s_agent`, `k3s_server` | `proxy_env.HTTPS_PROXY` | string | ❌ | Required | HTTP internet proxy |
|
||||
| `k3s_agent`, `k3s_server` | `proxy_env.NO_PROXY` | string | ❌ | Required | Addresses that will not use the proxies |
|
||||
| `k3s_agent`, `k3s_server`, `reset` | `systemd_dir` | string | `/etc/systemd/system` | Not required | Path to systemd services |
|
||||
| `k3s_custom_registries` | `custom_registries_yaml` | string | ❌ | Required | YAML block defining custom registries. The following is an example that pulls all images used in this playbook through your private registries. It also allows you to pull your own images from your private registry, without having to use imagePullSecrets in your deployments. If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images, you can just remove those from the mirrors: section. |
|
||||
| `k3s_server`, `k3s_server_post` | `cilium_bgp` | bool | `~` | Not required | Enable cilium BGP control plane for LB services and pod cidrs. Disables the use of MetalLB. |
|
||||
| `k3s_server`, `k3s_server_post` | `cilium_iface` | string | ❌ | Not required | The network interface used for when Cilium is enabled |
|
||||
| `k3s_server` | `extra_server_args` | string | `""` | Not required | Extra arguments for server nodes |
|
||||
| `k3s_server` | `k3s_create_kubectl_symlink` | bool | `false` | Not required | Create the kubectl -> k3s symlink |
|
||||
| `k3s_server` | `k3s_create_crictl_symlink` | bool | `true` | Not required | Create the crictl -> k3s symlink |
|
||||
| `k3s_server` | `kube_vip_arp` | bool | `true` | Not required | Enables kube-vip ARP broadcasts |
|
||||
| `k3s_server` | `kube_vip_bgp` | bool | `false` | Not required | Enables kube-vip BGP peering |
|
||||
| `k3s_server` | `kube_vip_bgp_routerid` | string | `"127.0.0.1"` | Not required | Defines the router ID for the kube-vip BGP server |
|
||||
| `k3s_server` | `kube_vip_bgp_as` | string | `"64513"` | Not required | Defines the AS for the kube-vip BGP server |
|
||||
| `k3s_server` | `kube_vip_bgp_peeraddress` | string | `"192.168.30.1"` | Not required | Defines the address for the kube-vip BGP peer |
|
||||
| `k3s_server` | `kube_vip_bgp_peeras` | string | `"64512"` | Not required | Defines the AS for the kube-vip BGP peer |
|
||||
| `k3s_server` | `kube_vip_bgp_peers` | list | `[]` | Not required | List of BGP peer ASN & address pairs |
|
||||
| `k3s_server` | `kube_vip_bgp_peers_groups` | list | `['k3s_master']` | Not required | Inventory group in which to search for additional `kube_vip_bgp_peers` parameters to merge. |
|
||||
| `k3s_server` | `kube_vip_iface` | string | `~` | Not required | Explicitly define an interface that ALL control nodes should use to propagate the VIP, define it here. Otherwise, kube-vip will determine the right interface automatically at runtime. |
|
||||
| `k3s_server` | `kube_vip_tag_version` | string | `v0.7.2` | Not required | Image tag for kube-vip |
|
||||
| `k3s_server` | `kube_vip_cloud_provider_tag_version` | string | `main` | Not required | Tag for kube-vip-cloud-provider manifest when enable |
|
||||
| `k3s_server`, `k3_server_post` | `kube_vip_lb_ip_range` | string | `~` | Not required | IP range for kube-vip load balancer |
|
||||
| `k3s_server`, `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
|
||||
| `k3s_server` | `metal_lb_speaker_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
|
||||
| `k3s_server` | `metal_lb_type` | string | `native` | Not required | Use FRR mode or native. Valid values are `frr` and `native` |
|
||||
| `k3s_server` | `retry_count` | int | `20` | Not required | Amount of retries when verifying that nodes joined |
|
||||
| `k3s_server` | `server_init_args` | string | ❌ | Not required | Arguments for server nodes |
|
||||
| `k3s_server_post` | `bpf_lb_algorithm` | string | `maglev` | Not required | BPF lb algorithm |
|
||||
| `k3s_server_post` | `bpf_lb_mode` | string | `hybrid` | Not required | BPF lb mode |
|
||||
| `k3s_server_post` | `calico_blocksize` | int | `26` | Not required | IP pool block size |
|
||||
| `k3s_server_post` | `calico_ebpf` | bool | `false` | Not required | Use eBPF dataplane instead of iptables |
|
||||
| `k3s_server_post` | `calico_encapsulation` | string | `VXLANCrossSubnet` | Not required | IP pool encapsulation |
|
||||
| `k3s_server_post` | `calico_natOutgoing` | string | `Enabled` | Not required | IP pool NAT outgoing |
|
||||
| `k3s_server_post` | `calico_nodeSelector` | string | `all()` | Not required | IP pool node selector |
|
||||
| `k3s_server_post` | `calico_iface` | string | `~` | Not required | The network interface used for when Calico is enabled |
|
||||
| `k3s_server_post` | `calico_tag` | string | `v3.27.2` | Not required | Calico version tag |
|
||||
| `k3s_server_post` | `cilium_bgp_my_asn` | int | `64513` | Not required | Local ASN for BGP peer |
|
||||
| `k3s_server_post` | `cilium_bgp_peer_asn` | int | `64512` | Not required | BGP peer ASN |
|
||||
| `k3s_server_post` | `cilium_bgp_peer_address` | string | `~` | Not required | BGP peer address |
|
||||
| `k3s_server_post` | `cilium_bgp_neighbors` | list | `[]` | Not required | List of BGP peer ASN & address pairs |
|
||||
| `k3s_server_post` | `cilium_bgp_neighbors_groups` | list | `['k3s_all']` | Not required | Inventory group in which to search for additional `cilium_bgp_neighbors` parameters to merge. |
|
||||
| `k3s_server_post` | `cilium_bgp_lb_cidr` | string | `192.168.31.0/24` | Not required | BGP load balancer IP range |
|
||||
| `k3s_server_post` | `cilium_exportPodCIDR` | bool | `true` | Not required | Export pod CIDR |
|
||||
| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble |
|
||||
| `k3s_server_post` | `cilium_hubble` | bool | `true` | Not required | Enable Cilium Hubble |
|
||||
| `k3s_server_post` | `cilium_mode` | string | `native` | Not required | Inner-node communication mode (choices are `native` and `routed`) |
|
||||
| `k3s_server_post` | `cluster_cidr` | string | `10.52.0.0/16` | Not required | Inner-cluster IP range |
|
||||
| `k3s_server_post` | `enable_bpf_masquerade` | bool | `true` | Not required | Use IP masquerading |
|
||||
| `k3s_server_post` | `kube_proxy_replacement` | bool | `true` | Not required | Replace the native kube-proxy with Cilium |
|
||||
| `k3s_server_post` | `metal_lb_available_timeout` | string | `240s` | Not required | Wait for MetalLB resources |
|
||||
| `k3s_server_post` | `metal_lb_ip_range` | string | `192.168.30.80-192.168.30.90` | Not required | MetalLB ip range for load balancer |
|
||||
| `k3s_server_post` | `metal_lb_controller_tag_version` | string | `v0.14.3` | Not required | Image tag for MetalLB |
|
||||
| `k3s_server_post` | `metal_lb_mode` | string | `layer2` | Not required | Metallb mode (choices are `bgp` and `layer2`) |
|
||||
| `k3s_server_post` | `metal_lb_bgp_my_asn` | string | `~` | Not required | BGP ASN configurations |
|
||||
| `k3s_server_post` | `metal_lb_bgp_peer_asn` | string | `~` | Not required | BGP peer ASN configurations |
|
||||
| `k3s_server_post` | `metal_lb_bgp_peer_address` | string | `~` | Not required | BGP peer address |
|
||||
| `lxc` | `custom_reboot_command` | string | `~` | Not required | Command to run on reboot |
|
||||
| `prereq` | `system_timezone` | string | `null` | Not required | Timezone to be set on all nodes |
|
||||
| `proxmox_lxc`, `reset_proxmox_lxc` | `proxmox_lxc_ct_ids` | list | ❌ | Required | Proxmox container ID list |
|
||||
| `raspberrypi` | `state` | string | `present` | Not required | Indicates whether the k3s prerequisites for Raspberry Pi should be set up (possible values are `present` and `absent`) |
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Be sure to see [this post](https://github.com/techno-tim/k3s-ansible/discussions/20) on how to troubleshoot common problems
|
||||
|
||||
### Testing the playbook using molecule
|
||||
|
||||
This playbook includes a [molecule](https://molecule.rtfd.io/)-based test setup.
|
||||
It is run automatically in CI, but you can also run the tests locally.
|
||||
This might be helpful for quick feedback in a few cases.
|
||||
You can find more information about it [here](molecule/README.md).
|
||||
|
||||
### Pre-commit Hooks
|
||||
|
||||
This repo uses `pre-commit` and `pre-commit-hooks` to lint and fix common style and syntax errors. Be sure to install python packages and then run `pre-commit install`. For more information, see [pre-commit](https://pre-commit.com/)
|
||||
|
||||
## 🌌 Ansible Galaxy
|
||||
|
||||
This collection can now be used in larger ansible projects.
|
||||
|
||||
Instructions:
|
||||
|
||||
- create or modify a file `collections/requirements.yml` in your project
|
||||
|
||||
```yml
|
||||
collections:
|
||||
- name: ansible.utils
|
||||
- name: community.general
|
||||
- name: ansible.posix
|
||||
- name: kubernetes.core
|
||||
- name: https://github.com/techno-tim/k3s-ansible.git
|
||||
type: git
|
||||
version: master
|
||||
```
|
||||
|
||||
- install via `ansible-galaxy collection install -r ./collections/requirements.yml`
|
||||
- every role is now available via the prefix `techno_tim.k3s_ansible.` e.g. `techno_tim.k3s_ansible.lxc`
|
||||
|
||||
## Thanks 🤝
|
||||
|
||||
This repo is really standing on the shoulders of giants. Thank you to all those who have contributed and thanks to these repos for code and ideas:
|
||||
|
||||
- [k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible)
|
||||
- [geerlingguy/turing-pi-cluster](https://github.com/geerlingguy/turing-pi-cluster)
|
||||
- [212850a/k3s-ansible](https://github.com/212850a/k3s-ansible)
|
||||
2
k3s-ansible-copia/ansible.cfg
Normal file
2
k3s-ansible-copia/ansible.cfg
Normal file
@ -0,0 +1,2 @@
|
||||
[defaults]
|
||||
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||
2
k3s-ansible-copia/ansible.example.cfg
Normal file
2
k3s-ansible-copia/ansible.example.cfg
Normal file
@ -0,0 +1,2 @@
|
||||
[defaults]
|
||||
inventory = inventory/my-cluster/hosts.ini ; Adapt this to the path to your inventory file
|
||||
6
k3s-ansible-copia/collections/requirements.yml
Normal file
6
k3s-ansible-copia/collections/requirements.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.utils
|
||||
- name: community.general
|
||||
- name: ansible.posix
|
||||
- name: kubernetes.core
|
||||
3
k3s-ansible-copia/deploy.sh
Executable file
3
k3s-ansible-copia/deploy.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook site.yml
|
||||
2339
k3s-ansible-copia/error
Normal file
2339
k3s-ansible-copia/error
Normal file
File diff suppressed because it is too large
Load Diff
20
k3s-ansible-copia/example/deployment.yml
Normal file
20
k3s-ansible-copia/example/deployment.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
13
k3s-ansible-copia/example/service.yml
Normal file
13
k3s-ansible-copia/example/service.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
ipFamilyPolicy: PreferDualStack
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
type: LoadBalancer
|
||||
1
k3s-ansible-copia/fk
Normal file
1
k3s-ansible-copia/fk
Normal file
@ -0,0 +1 @@
|
||||
cont
|
||||
81
k3s-ansible-copia/galaxy.yml
Normal file
81
k3s-ansible-copia/galaxy.yml
Normal file
@ -0,0 +1,81 @@
|
||||
### REQUIRED
|
||||
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||
# underscores or numbers and cannot contain consecutive underscores
|
||||
namespace: techno_tim
|
||||
|
||||
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||
name: k3s_ansible
|
||||
|
||||
# The version of the collection. Must be compatible with semantic versioning
|
||||
version: 1.0.0
|
||||
|
||||
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||
readme: README.md
|
||||
|
||||
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||
# @nicks:irc/im.site#channel'
|
||||
authors:
|
||||
- your name <example@domain.com>
|
||||
|
||||
|
||||
### OPTIONAL but strongly recommended
|
||||
# A short summary description of the collection
|
||||
description: >
|
||||
The easiest way to bootstrap a self-hosted High Availability Kubernetes
|
||||
cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB,
|
||||
and more.
|
||||
|
||||
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||
license:
|
||||
- Apache-2.0
|
||||
|
||||
|
||||
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||
# requirements as 'namespace' and 'name'
|
||||
tags:
|
||||
- etcd
|
||||
- high-availability
|
||||
- k8s
|
||||
- k3s
|
||||
- k3s-cluster
|
||||
- kube-vip
|
||||
- kubernetes
|
||||
- metallb
|
||||
- rancher
|
||||
|
||||
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||
# collection label 'namespace.name'. The value is a version range
|
||||
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||
# range specifiers can be set and are separated by ','
|
||||
dependencies:
|
||||
ansible.utils: '*'
|
||||
ansible.posix: '*'
|
||||
community.general: '*'
|
||||
kubernetes.core: '*'
|
||||
|
||||
# The URL of the originating SCM repository
|
||||
repository: https://github.com/techno-tim/k3s-ansible
|
||||
|
||||
# The URL to any online docs
|
||||
documentation: https://github.com/techno-tim/k3s-ansible
|
||||
|
||||
# The URL to the homepage of the collection/project
|
||||
homepage: https://www.youtube.com/watch?v=CbkEWcUZ7zM
|
||||
|
||||
# The URL to the collection issue tracker
|
||||
issues: https://github.com/techno-tim/k3s-ansible/issues
|
||||
|
||||
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||
build_ignore: []
|
||||
|
||||
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||
# list of MANIFEST.in style
|
||||
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||
# with 'build_ignore'
|
||||
# manifest: null
|
||||
3
k3s-ansible-copia/inventory/.gitignore
vendored
Normal file
3
k3s-ansible-copia/inventory/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/*
|
||||
!.gitignore
|
||||
!sample/
|
||||
171
k3s-ansible-copia/inventory/sample/group_vars/all.yml
Normal file
171
k3s-ansible-copia/inventory/sample/group_vars/all.yml
Normal file
@ -0,0 +1,171 @@
|
||||
---
|
||||
k3s_version: v1.30.2+k3s2
|
||||
# this is the user that has ssh access to these machines
|
||||
ansible_user: ansibleuser
|
||||
systemd_dir: /etc/systemd/system
|
||||
|
||||
# Set your timezone
|
||||
system_timezone: "Your/Timezone"
|
||||
|
||||
# interface which will be used for flannel
|
||||
flannel_iface: "eth0"
|
||||
|
||||
# uncomment calico_iface to use tigera operator/calico cni instead of flannel https://docs.tigera.io/calico/latest/about
|
||||
# calico_iface: "eth0"
|
||||
calico_ebpf: false # use eBPF dataplane instead of iptables
|
||||
calico_tag: "v3.28.0" # calico version tag
|
||||
|
||||
# uncomment cilium_iface to use cilium cni instead of flannel or calico
|
||||
# ensure v4.19.57, v5.1.16, v5.2.0 or more recent kernel
|
||||
# cilium_iface: "eth0"
|
||||
cilium_mode: "native" # native when nodes on same subnet or using bgp, else set routed
|
||||
cilium_tag: "v1.16.0" # cilium version tag
|
||||
cilium_hubble: true # enable hubble observability relay and ui
|
||||
|
||||
# if using calico or cilium, you may specify the cluster pod cidr pool
|
||||
cluster_cidr: "10.52.0.0/16"
|
||||
|
||||
# enable cilium bgp control plane for lb services and pod cidrs. disables metallb.
|
||||
cilium_bgp: false
|
||||
|
||||
# bgp parameters for cilium cni. only active when cilium_iface is defined and cilium_bgp is true.
|
||||
cilium_bgp_my_asn: "64513"
|
||||
cilium_bgp_peer_asn: "64512"
|
||||
cilium_bgp_peer_address: "192.168.30.1"
|
||||
cilium_bgp_lb_cidr: "192.168.31.0/24" # cidr for cilium loadbalancer ipam
|
||||
|
||||
# apiserver_endpoint is virtual ip-address which will be configured on each master
|
||||
apiserver_endpoint: "192.168.30.222"
|
||||
|
||||
# k3s_token is required masters can talk together securely
|
||||
# this token should be alpha numeric only
|
||||
k3s_token: "some-SUPER-DEDEUPER-secret-password"
|
||||
|
||||
# The IP on which the node is reachable in the cluster.
|
||||
# Here, a sensible default is provided, you can still override
|
||||
# it for each of your hosts, though.
|
||||
k3s_node_ip: "{{ ansible_facts[(cilium_iface | default(calico_iface | default(flannel_iface)))]['ipv4']['address'] }}"
|
||||
|
||||
# Disable the taint manually by setting: k3s_master_taint = false
|
||||
k3s_master_taint: "{{ true if groups['node'] | default([]) | length >= 1 else false }}"
|
||||
|
||||
# these arguments are recommended for servers as well as agents:
|
||||
extra_args: >-
|
||||
{{ '--flannel-iface=' + flannel_iface if calico_iface is not defined and cilium_iface is not defined else '' }}
|
||||
--node-ip={{ k3s_node_ip }}
|
||||
|
||||
# change these to your liking, the only required are: --disable servicelb, --tls-san {{ apiserver_endpoint }}
|
||||
# the contents of the if block is also required if using calico or cilium
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
{% if calico_iface is defined or cilium_iface is defined %}
|
||||
--flannel-backend=none
|
||||
--disable-network-policy
|
||||
--cluster-cidr={{ cluster_cidr | default('10.52.0.0/16') }}
|
||||
{% endif %}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
|
||||
extra_agent_args: >-
|
||||
{{ extra_args }}
|
||||
|
||||
# image tag for kube-vip
|
||||
kube_vip_tag_version: "v0.8.2"
|
||||
|
||||
# tag for kube-vip-cloud-provider manifest
|
||||
# kube_vip_cloud_provider_tag_version: "main"
|
||||
|
||||
# kube-vip ip range for load balancer
|
||||
# (uncomment to use kube-vip for services instead of MetalLB)
|
||||
# kube_vip_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
# metallb type frr or native
|
||||
metal_lb_type: "native"
|
||||
|
||||
# metallb mode layer2 or bgp
|
||||
metal_lb_mode: "layer2"
|
||||
|
||||
# bgp options
|
||||
# metal_lb_bgp_my_asn: "64513"
|
||||
# metal_lb_bgp_peer_asn: "64512"
|
||||
# metal_lb_bgp_peer_address: "192.168.30.1"
|
||||
|
||||
# image tag for metal lb
|
||||
metal_lb_speaker_tag_version: "v0.14.8"
|
||||
metal_lb_controller_tag_version: "v0.14.8"
|
||||
|
||||
# metallb ip range for load balancer
|
||||
metal_lb_ip_range: "192.168.30.80-192.168.30.90"
|
||||
|
||||
# Only enable if your nodes are proxmox LXC nodes, make sure to configure your proxmox nodes
|
||||
# in your hosts.ini file.
|
||||
# Please read https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185 before using this.
|
||||
# Most notably, your containers must be privileged, and must not have nesting set to true.
|
||||
# Please note this script disables most of the security of lxc containers, with the trade off being that lxc
|
||||
# containers are significantly more resource efficient compared to full VMs.
|
||||
# Mixing and matching VMs and lxc containers is not supported, ymmv if you want to do this.
|
||||
# I would only really recommend using this if you have particularly low powered proxmox nodes where the overhead of
|
||||
# VMs would use a significant portion of your available resources.
|
||||
proxmox_lxc_configure: false
|
||||
# the user that you would use to ssh into the host, for example if you run ssh some-user@my-proxmox-host,
|
||||
# set this value to some-user
|
||||
proxmox_lxc_ssh_user: root
|
||||
# the unique proxmox ids for all of the containers in the cluster, both worker and master nodes
|
||||
proxmox_lxc_ct_ids:
|
||||
- 200
|
||||
- 201
|
||||
- 202
|
||||
- 203
|
||||
- 204
|
||||
|
||||
# Only enable this if you have set up your own container registry to act as a mirror / pull-through cache
|
||||
# (harbor / nexus / docker's official registry / etc).
|
||||
# Can be beneficial for larger dev/test environments (for example if you're getting rate limited by docker hub),
|
||||
# or air-gapped environments where your nodes don't have internet access after the initial setup
|
||||
# (which is still needed for downloading the k3s binary and such).
|
||||
# k3s's documentation about private registries here: https://docs.k3s.io/installation/private-registry
|
||||
custom_registries: false
|
||||
# The registries can be authenticated or anonymous, depending on your registry server configuration.
|
||||
# If they allow anonymous access, simply remove the following bit from custom_registries_yaml
|
||||
# configs:
|
||||
# "registry.domain.com":
|
||||
# auth:
|
||||
# username: yourusername
|
||||
# password: yourpassword
|
||||
# The following is an example that pulls all images used in this playbook through your private registries.
|
||||
# It also allows you to pull your own images from your private registry, without having to use imagePullSecrets
|
||||
# in your deployments.
|
||||
# If all you need is your own images and you don't care about caching the docker/quay/ghcr.io images,
|
||||
# you can just remove those from the mirrors: section.
|
||||
custom_registries_yaml: |
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/dockerhub"
|
||||
quay.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/quayio"
|
||||
ghcr.io:
|
||||
endpoint:
|
||||
- "https://registry.domain.com/v2/ghcrio"
|
||||
registry.domain.com:
|
||||
endpoint:
|
||||
- "https://registry.domain.com"
|
||||
|
||||
configs:
|
||||
"registry.domain.com":
|
||||
auth:
|
||||
username: yourusername
|
||||
password: yourpassword
|
||||
|
||||
# On some distros like Diet Pi, there is no dbus installed. dbus required by the default reboot command.
|
||||
# Uncomment if you need a custom reboot command
|
||||
# custom_reboot_command: /usr/sbin/shutdown -r now
|
||||
|
||||
# Only enable and configure these if you access the internet through a proxy
|
||||
# proxy_env:
|
||||
# HTTP_PROXY: "http://proxy.domain.local:3128"
|
||||
# HTTPS_PROXY: "http://proxy.domain.local:3128"
|
||||
# NO_PROXY: "*.domain.local,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||
@ -0,0 +1,2 @@
|
||||
---
|
||||
ansible_user: '{{ proxmox_lxc_ssh_user }}'
|
||||
17
k3s-ansible-copia/inventory/sample/hosts.ini
Normal file
17
k3s-ansible-copia/inventory/sample/hosts.ini
Normal file
@ -0,0 +1,17 @@
|
||||
[master]
|
||||
192.168.30.38
|
||||
192.168.30.39
|
||||
192.168.30.40
|
||||
|
||||
[node]
|
||||
192.168.30.41
|
||||
192.168.30.42
|
||||
|
||||
# only required if proxmox_lxc_configure: true
|
||||
# must contain all proxmox instances that have a master or worker node
|
||||
# [proxmox]
|
||||
# 192.168.30.43
|
||||
|
||||
[k3s_cluster:children]
|
||||
master
|
||||
node
|
||||
0
k3s-ansible-copia/k3s.crt
Normal file
0
k3s-ansible-copia/k3s.crt
Normal file
0
k3s-ansible-copia/k3s_ca.crt
Normal file
0
k3s-ansible-copia/k3s_ca.crt
Normal file
19
k3s-ansible-copia/kubeconfig
Normal file
19
k3s-ansible-copia/kubeconfig
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUzTkRJek1UZ3hNalF3SGhjTk1qVXdNekU0TVRjeE5USTBXaGNOTXpVd016RTJNVGN4TlRJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUzTkRJek1UZ3hNalF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRdXgzUzZOdUJ0bXExbzhIaFFkL0pYK3BLdm1UMEpMSkNWdFBqNjNkWFkKR3lmSnlDM3dLazdIZzNGMS90eExnSFRUUHRmUm56b0ZEdGNPZU5xWEpUejFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWJUTnRFL0JUUmpIZ1ljbEJkRm9QCkVhT3JsT2N3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUloQU9ObWx5QUxXeklhTkFoZ1BRMlVtb0tmdmF3V3IrNlAKaG5rQkhVTVV2TTcrQWlCLzJsSWJyZzV3TjJwMC9RY0duWVllcEppbzF2ZHRjTHNmYmhVMm5FbndFZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
server: https://192.168.1.222:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
user: default
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: default
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJZmlmRjE3UDRVRFV3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOelF5TXpFNE1USTBNQjRYRFRJMU1ETXhPREUzTVRVeU5Gb1hEVEkyTURNeApPREUzTVRVeU5Gb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJDK3AwNFhSeWNWMzZQZVQKWWJvVU44OFhXemZHVkZGenFBRzlsdi90cGVVNlNFZEI4YzNBamU3STA2UitnY2FNTjlvekVFS096cFVYcktmVgpMWFJEUlRpalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUjZOM3l6Yyt4OFFIcHo2U3F1UkhBdjBlY0lBREFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQXBoRlloN3FERVJSSmlDcWtYS0hDbXMvTDRDMDVMZVhxT0ZoWUZRNGVBN1lDSUU0KzJKZHFwSHhEV1hkQworU2M4VFBmODFwZTU5Q0t4MnBETllDZjdUcFNjCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUzTkRJek1UZ3hNalF3SGhjTk1qVXdNekU0TVRjeE5USTBXaGNOTXpVd016RTJNVGN4TlRJMApXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUzTkRJek1UZ3hNalF3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRSi9ndlFjbXphVG5XcHd3VlRYaUdNUGVqeWFnaWhtSUl5SU5iUHNtR0MKWWIxTWRqQ1RYZ3V4OUJrUUhJRWVQMEhvY1FuSEhpeUhGY1orb09iWGVPWlFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWVqZDhzM1BzZkVCNmMra3Fya1J3Ckw5SG5DQUF3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnZjJhekc0VEo5c084NXlPWE12NVNrcWczRTdsMFNTM3kKN2g3QzExcVlmSWdDSUJuTnBrR1d6QjFycVBzdHI0dGlSWGdmVE8vc3lnbXM2cm5WZjcwNzlpRncKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUc4NmJjVlJZYTVTQ2NUZ08zK0xQRHRDb1VRVS9VNm1DUEh3akhTN1BYMWtvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTDZuVGhkSEp4WGZvOTVOaHVoUTN6eGRiTjhaVVVYT29BYjJXLysybDVUcElSMEh4emNDTgo3c2pUcEg2QnhvdzMyak1RUW83T2xSZXNwOVV0ZEVORk9BPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
79
k3s-ansible-copia/molecule/README.md
Normal file
79
k3s-ansible-copia/molecule/README.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Test suites for `k3s-ansible`
|
||||
|
||||
This folder contains the [molecule](https://molecule.rtfd.io/)-based test setup for this playbook.
|
||||
|
||||
## Scenarios
|
||||
|
||||
We have these scenarios:
|
||||
|
||||
- **default**:
|
||||
A 3 control + 2 worker node cluster based very closely on the [sample inventory](../inventory/sample/).
|
||||
- **ipv6**:
|
||||
A cluster that is externally accessible via IPv6 ([more information](ipv6/README.md))
|
||||
To save a bit of test time, this cluster is _not_ highly available, it consists of only one control and one worker node.
|
||||
- **single_node**:
|
||||
Very similar to the default scenario, but uses only a single node for all cluster functionality.
|
||||
- **calico**:
|
||||
The same as single node, but uses calico cni instead of flannel.
|
||||
- **cilium**:
|
||||
The same as single node, but uses cilium cni instead of flannel.
|
||||
- **kube-vip**
|
||||
The same as single node, but uses kube-vip as service loadbalancer instead of MetalLB
|
||||
|
||||
## How to execute
|
||||
|
||||
To test on your local machine, follow these steps:
|
||||
|
||||
### System requirements
|
||||
|
||||
Make sure that the following software packages are available on your system:
|
||||
|
||||
- [Python 3](https://www.python.org/downloads)
|
||||
- [Vagrant](https://www.vagrantup.com/downloads)
|
||||
- [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
|
||||
|
||||
### Set up VirtualBox networking on Linux and macOS
|
||||
|
||||
_You can safely skip this if you are working on Windows._
|
||||
|
||||
Furthermore, the test cluster uses the `192.168.30.0/24` subnet which is [not set up by VirtualBox automatically](https://www.virtualbox.org/manual/ch06.html#network_hostonly).
|
||||
To set the subnet up for use with VirtualBox, please make sure that `/etc/vbox/networks.conf` exists and that it contains this line:
|
||||
|
||||
```
|
||||
* 192.168.30.0/24
|
||||
* fdad:bad:ba55::/64
|
||||
```
|
||||
|
||||
### Install Python dependencies
|
||||
|
||||
You will get [Molecule, Ansible and a few extra dependencies](../requirements.txt) via [pip](https://pip.pypa.io/).
|
||||
Usually, it is advisable to work in a [virtual environment](https://docs.python.org/3/tutorial/venv.html) for this:
|
||||
|
||||
```bash
|
||||
cd /path/to/k3s-ansible
|
||||
|
||||
# Create a virtualenv at ".env". You only need to do this once.
|
||||
python3 -m venv .env
|
||||
|
||||
# Activate the virtualenv for your current shell session.
|
||||
# If you start a new session, you will have to repeat this.
|
||||
source .env/bin/activate
|
||||
|
||||
# Install the required packages into the virtualenv.
|
||||
# These remain installed across shell sessions.
|
||||
python3 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Run molecule
|
||||
|
||||
With the virtual environment from the previous step active in your shell session, you can now use molecule to test the playbook.
|
||||
Interesting commands are:
|
||||
|
||||
- `molecule create`: Create virtual machines for the test cluster nodes.
|
||||
- `molecule destroy`: Delete the virtual machines for the test cluster nodes.
|
||||
- `molecule converge`: Run the `site` playbook on the nodes of the test cluster.
|
||||
- `molecule side_effect`: Run the `reset` playbook on the nodes of the test cluster.
|
||||
- `molecule verify`: Verify that the cluster works correctly.
|
||||
- `molecule test`: The "all-in-one" sequence of steps that is executed in CI.
|
||||
This includes the `create`, `converge`, `verify`, `side_effect` and `destroy` steps.
|
||||
See [`molecule.yml`](default/molecule.yml) for more details.
|
||||
49
k3s-ansible-copia/molecule/calico/molecule.yml
Normal file
49
k3s-ansible-copia/molecule/calico/molecule.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.62
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
16
k3s-ansible-copia/molecule/calico/overrides.yml
Normal file
16
k3s-ansible-copia/molecule/calico/overrides.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
calico_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: 192.168.30.224
|
||||
metal_lb_ip_range: 192.168.30.100-192.168.30.109
|
||||
49
k3s-ansible-copia/molecule/cilium/molecule.yml
Normal file
49
k3s-ansible-copia/molecule/cilium/molecule.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.63
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
16
k3s-ansible-copia/molecule/cilium/overrides.yml
Normal file
16
k3s-ansible-copia/molecule/cilium/overrides.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
cilium_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: 192.168.30.225
|
||||
metal_lb_ip_range: 192.168.30.110-192.168.30.119
|
||||
99
k3s-ansible-copia/molecule/default/molecule.yml
Normal file
99
k3s-ansible-copia/molecule/default/molecule.yml
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.38
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: control2
|
||||
box: generic/debian12
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.39
|
||||
|
||||
- name: control3
|
||||
box: generic/rocky9
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.40
|
||||
|
||||
- name: node1
|
||||
box: generic/ubuntu2204
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- node
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.41
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: node2
|
||||
box: generic/rocky9
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- node
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.42
|
||||
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
12
k3s-ansible-copia/molecule/default/overrides.yml
Normal file
12
k3s-ansible-copia/molecule/default/overrides.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
flannel_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
22
k3s-ansible-copia/molecule/default/prepare.yml
Normal file
22
k3s-ansible-copia/molecule/default/prepare.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
ansible.builtin.import_playbook: >-
|
||||
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||
|
||||
- name: Network setup
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Disable firewalld
|
||||
when: ansible_distribution == "Rocky"
|
||||
# Rocky Linux comes with firewalld enabled. It blocks some of the network
|
||||
# connections needed for our k3s cluster. For our test setup, we just disable
|
||||
# it since the VM host's firewall is still active for connections to and from
|
||||
# the Internet.
|
||||
# When building your own cluster, please DO NOT blindly copy this. Instead,
|
||||
# please create a custom firewall configuration that fits your network design
|
||||
# and security needs.
|
||||
ansible.builtin.systemd:
|
||||
name: firewalld
|
||||
enabled: false
|
||||
state: stopped
|
||||
become: true
|
||||
35
k3s-ansible-copia/molecule/ipv6/README.md
Normal file
35
k3s-ansible-copia/molecule/ipv6/README.md
Normal file
@ -0,0 +1,35 @@
|
||||
# Sample IPv6 configuration for `k3s-ansible`
|
||||
|
||||
This scenario contains a cluster configuration which is _IPv6 first_, but still supports dual-stack networking with IPv4 for most things.
|
||||
This means:
|
||||
|
||||
- The API server VIP is an IPv6 address.
|
||||
- The MetalLB pool consists of both IPv4 and IPv4 addresses.
|
||||
- Nodes as well as cluster-internal resources (pods and services) are accessible via IPv4 as well as IPv6.
|
||||
|
||||
## Network design
|
||||
|
||||
All IPv6 addresses used in this scenario share a single `/48` prefix: `fdad:bad:ba55`.
|
||||
The following subnets are used:
|
||||
|
||||
- `fdad:bad:ba55:`**`0`**`::/64` is the subnet which contains the cluster components meant for external access.
|
||||
That includes:
|
||||
|
||||
- The VIP for the Kubernetes API server: `fdad:bad:ba55::333`
|
||||
- Services load-balanced by MetalLB: `fdad:bad:ba55::1b:0/112`
|
||||
- Cluster nodes: `fdad:bad:ba55::de:0/112`
|
||||
- The host executing Vagrant: `fdad:bad:ba55::1`
|
||||
|
||||
In a home lab setup, this might be your LAN.
|
||||
|
||||
- `fdad:bad:ba55:`**`4200`**`::/56` is used internally by the cluster for pods.
|
||||
|
||||
- `fdad:bad:ba55:`**`4300`**`::/108` is used internally by the cluster for services.
|
||||
|
||||
IPv4 networking is also available:
|
||||
|
||||
- The nodes have addresses inside `192.168.123.0/24`.
|
||||
MetalLB also has a bit of address space in this range: `192.168.123.80-192.168.123.90`
|
||||
- For pods and services, the k3s defaults (`10.42.0.0/16` and `10.43.0.0/16)` are used.
|
||||
|
||||
Note that the host running Vagrant is not part any of these IPv4 networks.
|
||||
3
k3s-ansible-copia/molecule/ipv6/host_vars/control1.yml
Normal file
3
k3s-ansible-copia/molecule/ipv6/host_vars/control1.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
node_ipv4: 192.168.123.11
|
||||
node_ipv6: fdad:bad:ba55::de:11
|
||||
3
k3s-ansible-copia/molecule/ipv6/host_vars/control2.yml
Normal file
3
k3s-ansible-copia/molecule/ipv6/host_vars/control2.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
node_ipv4: 192.168.123.12
|
||||
node_ipv6: fdad:bad:ba55::de:12
|
||||
3
k3s-ansible-copia/molecule/ipv6/host_vars/node1.yml
Normal file
3
k3s-ansible-copia/molecule/ipv6/host_vars/node1.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
node_ipv4: 192.168.123.21
|
||||
node_ipv6: fdad:bad:ba55::de:21
|
||||
81
k3s-ansible-copia/molecule/ipv6/molecule.yml
Normal file
81
k3s-ansible-copia/molecule/ipv6/molecule.yml
Normal file
@ -0,0 +1,81 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: fdad:bad:ba55::de:11
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: control2
|
||||
box: generic/ubuntu2204
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: fdad:bad:ba55::de:12
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
|
||||
- name: node1
|
||||
box: generic/ubuntu2204
|
||||
memory: 1024
|
||||
cpus: 2
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- node
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: fdad:bad:ba55::de:21
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
51
k3s-ansible-copia/molecule/ipv6/overrides.yml
Normal file
51
k3s-ansible-copia/molecule/ipv6/overrides.yml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables (1/2)
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
flannel_iface: eth1
|
||||
|
||||
# In this scenario, we have multiple interfaces that the VIP could be
|
||||
# broadcasted on. Since we have assigned a dedicated private network
|
||||
# here, let's make sure that it is used.
|
||||
kube_vip_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# IPv6 configuration
|
||||
# ######################################################################
|
||||
|
||||
# The API server will be reachable on IPv6 only
|
||||
apiserver_endpoint: fdad:bad:ba55::333
|
||||
|
||||
# We give MetalLB address space for both IPv4 and IPv6
|
||||
metal_lb_ip_range:
|
||||
- fdad:bad:ba55::1b:0/112
|
||||
- 192.168.123.80-192.168.123.90
|
||||
|
||||
# k3s_node_ip is by default set to the IPv4 address of flannel_iface.
|
||||
# We want IPv6 addresses here of course, so we just specify them
|
||||
# manually below.
|
||||
k3s_node_ip: "{{ node_ipv4 }},{{ node_ipv6 }}"
|
||||
|
||||
- name: Override host variables (2/2)
|
||||
# Since "extra_args" depends on "k3s_node_ip" and "flannel_iface" we have
|
||||
# to set this AFTER overriding the both of them.
|
||||
ansible.builtin.set_fact:
|
||||
# A few extra server args are necessary:
|
||||
# - the network policy needs to be disabled.
|
||||
# - we need to manually specify the subnets for services and pods, as
|
||||
# the default has IPv4 ranges only.
|
||||
extra_server_args: >-
|
||||
{{ extra_args }}
|
||||
--tls-san {{ apiserver_endpoint }}
|
||||
{{ '--node-taint node-role.kubernetes.io/master=true:NoSchedule' if k3s_master_taint else '' }}
|
||||
--disable servicelb
|
||||
--disable traefik
|
||||
--disable-network-policy
|
||||
--cluster-cidr=10.42.0.0/16,fdad:bad:ba55:4200::/56
|
||||
--service-cidr=10.43.0.0/16,fdad:bad:ba55:4300::/108
|
||||
51
k3s-ansible-copia/molecule/ipv6/prepare.yml
Normal file
51
k3s-ansible-copia/molecule/ipv6/prepare.yml
Normal file
@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
ansible.builtin.import_playbook: >-
|
||||
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||
|
||||
- name: Configure dual-stack networking
|
||||
hosts: all
|
||||
become: true
|
||||
|
||||
# Unfortunately, as of 2022-09, Vagrant does not support the configuration
|
||||
# of both IPv4 and IPv6 addresses for a single network adapter. So we have
|
||||
# to configure that ourselves.
|
||||
# Moreover, we have to explicitly enable IPv6 for the loopback interface.
|
||||
|
||||
tasks:
|
||||
- name: Enable IPv6 for network interfaces
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv6.conf.{{ item }}.disable_ipv6
|
||||
value: "0"
|
||||
with_items:
|
||||
- all
|
||||
- default
|
||||
- lo
|
||||
|
||||
- name: Disable duplicate address detection
|
||||
# Duplicate address detection did repeatedly fail within the virtual
|
||||
# network. But since this setup does not use SLAAC anyway, we can safely
|
||||
# disable it.
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv6.conf.{{ item }}.accept_dad
|
||||
value: "0"
|
||||
with_items:
|
||||
- "{{ flannel_iface }}"
|
||||
|
||||
- name: Write IPv4 configuration
|
||||
ansible.builtin.template:
|
||||
src: 55-flannel-ipv4.yaml.j2
|
||||
dest: /etc/netplan/55-flannel-ipv4.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
register: netplan_template
|
||||
|
||||
- name: Apply netplan configuration
|
||||
# Conceptually, this should be a handler rather than a task.
|
||||
# However, we are currently not in a role context - creating
|
||||
# one just for this seemed overkill.
|
||||
when: netplan_template.changed
|
||||
ansible.builtin.command:
|
||||
cmd: netplan apply
|
||||
changed_when: true
|
||||
@ -0,0 +1,8 @@
|
||||
---
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
{{ flannel_iface }}:
|
||||
addresses:
|
||||
- {{ node_ipv4 }}/24
|
||||
49
k3s-ansible-copia/molecule/kube-vip/molecule.yml
Normal file
49
k3s-ansible-copia/molecule/kube-vip/molecule.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.62
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
17
k3s-ansible-copia/molecule/kube-vip/overrides.yml
Normal file
17
k3s-ansible-copia/molecule/kube-vip/overrides.yml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
flannel_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the other scenarios
|
||||
apiserver_endpoint: 192.168.30.225
|
||||
# Use kube-vip instead of MetalLB
|
||||
kube_vip_lb_ip_range: 192.168.30.110-192.168.30.119
|
||||
7
k3s-ansible-copia/molecule/resources/converge.yml
Normal file
7
k3s-ansible-copia/molecule/resources/converge.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
ansible.builtin.import_playbook: >-
|
||||
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||
|
||||
- name: Converge
|
||||
ansible.builtin.import_playbook: ../../site.yml
|
||||
7
k3s-ansible-copia/molecule/resources/reset.yml
Normal file
7
k3s-ansible-copia/molecule/resources/reset.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
ansible.builtin.import_playbook: >-
|
||||
{{ lookup("ansible.builtin.env", "MOLECULE_SCENARIO_DIRECTORY") }}/overrides.yml
|
||||
|
||||
- name: Reset
|
||||
ansible.builtin.import_playbook: ../../reset.yml
|
||||
5
k3s-ansible-copia/molecule/resources/verify.yml
Normal file
5
k3s-ansible-copia/molecule/resources/verify.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Verify
|
||||
hosts: all
|
||||
roles:
|
||||
- verify_from_outside
|
||||
@ -0,0 +1,9 @@
|
||||
---
|
||||
# A host outside of the cluster from which the checks shall be performed
|
||||
outside_host: localhost
|
||||
|
||||
# This kubernetes namespace will be used for testing
|
||||
testing_namespace: molecule-verify-from-outside
|
||||
|
||||
# The directory in which the example manifests reside
|
||||
example_manifests_path: ../../../example
|
||||
@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Clean up kubecfg
|
||||
ansible.builtin.file:
|
||||
path: "{{ kubecfg.path }}"
|
||||
state: absent
|
||||
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Create temporary directory for kubecfg
|
||||
ansible.builtin.tempfile:
|
||||
state: directory
|
||||
suffix: kubecfg
|
||||
register: kubecfg
|
||||
- name: Gathering facts
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
ansible.builtin.gather_facts:
|
||||
- name: Download kubecfg
|
||||
ansible.builtin.fetch:
|
||||
src: "{{ ansible_env.HOME }}/.kube/config"
|
||||
dest: "{{ kubecfg.path }}/"
|
||||
flat: true
|
||||
delegate_to: "{{ groups['master'][0] }}"
|
||||
delegate_facts: true
|
||||
- name: Store path to kubecfg
|
||||
ansible.builtin.set_fact:
|
||||
kubecfg_path: "{{ kubecfg.path }}/config"
|
||||
@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Verify
|
||||
run_once: true
|
||||
delegate_to: "{{ outside_host }}"
|
||||
block:
|
||||
- name: "Test CASE: Get kube config"
|
||||
ansible.builtin.import_tasks: kubecfg-fetch.yml
|
||||
- name: "TEST CASE: Get nodes"
|
||||
ansible.builtin.include_tasks: test/get-nodes.yml
|
||||
- name: "TEST CASE: Deploy example"
|
||||
ansible.builtin.include_tasks: test/deploy-example.yml
|
||||
always:
|
||||
- name: "TEST CASE: Cleanup"
|
||||
ansible.builtin.import_tasks: kubecfg-cleanup.yml
|
||||
@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Deploy example
|
||||
block:
|
||||
- name: "Create namespace: {{ testing_namespace }}"
|
||||
kubernetes.core.k8s:
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
name: "{{ testing_namespace }}"
|
||||
state: present
|
||||
wait: true
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
|
||||
- name: Apply example manifests
|
||||
kubernetes.core.k8s:
|
||||
src: "{{ example_manifests_path }}/{{ item }}"
|
||||
namespace: "{{ testing_namespace }}"
|
||||
state: present
|
||||
wait: true
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
with_items:
|
||||
- deployment.yml
|
||||
- service.yml
|
||||
|
||||
- name: Get info about nginx service
|
||||
kubernetes.core.k8s_info:
|
||||
kind: service
|
||||
name: nginx
|
||||
namespace: "{{ testing_namespace }}"
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
vars:
|
||||
metallb_ip: status.loadBalancer.ingress[0].ip
|
||||
metallb_port: spec.ports[0].port
|
||||
register: nginx_services
|
||||
|
||||
- name: Assert that the nginx welcome page is available
|
||||
ansible.builtin.uri:
|
||||
url: http://{{ ip | ansible.utils.ipwrap }}:{{ port_ }}/
|
||||
return_content: true
|
||||
register: result
|
||||
failed_when: "'Welcome to nginx!' not in result.content"
|
||||
vars:
|
||||
ip: >-
|
||||
{{ nginx_services.resources[0].status.loadBalancer.ingress[0].ip }}
|
||||
port_: >-
|
||||
{{ nginx_services.resources[0].spec.ports[0].port }}
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
always:
|
||||
- name: "Remove namespace: {{ testing_namespace }}"
|
||||
kubernetes.core.k8s:
|
||||
api_version: v1
|
||||
kind: Namespace
|
||||
name: "{{ testing_namespace }}"
|
||||
state: absent
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Get all nodes in cluster
|
||||
kubernetes.core.k8s_info:
|
||||
kind: node
|
||||
kubeconfig: "{{ kubecfg_path }}"
|
||||
register: cluster_nodes
|
||||
|
||||
- name: Assert that the cluster contains exactly the expected nodes
|
||||
ansible.builtin.assert:
|
||||
that: found_nodes == expected_nodes
|
||||
success_msg: "Found nodes as expected: {{ found_nodes }}"
|
||||
fail_msg: Expected nodes {{ expected_nodes }}, but found nodes {{ found_nodes }}
|
||||
vars:
|
||||
found_nodes: >-
|
||||
{{ cluster_nodes | json_query('resources[*].metadata.name') | unique | sort }}
|
||||
expected_nodes: |-
|
||||
{{
|
||||
(
|
||||
( groups['master'] | default([]) ) +
|
||||
( groups['node'] | default([]) )
|
||||
)
|
||||
| unique
|
||||
| sort
|
||||
}}
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
49
k3s-ansible-copia/molecule/single_node/molecule.yml
Normal file
49
k3s-ansible-copia/molecule/single_node/molecule.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
driver:
|
||||
name: vagrant
|
||||
platforms:
|
||||
- name: control1
|
||||
box: generic/ubuntu2204
|
||||
memory: 4096
|
||||
cpus: 4
|
||||
config_options:
|
||||
# We currently can not use public-key based authentication on Ubuntu 22.04,
|
||||
# see: https://github.com/chef/bento/issues/1405
|
||||
ssh.username: vagrant
|
||||
ssh.password: vagrant
|
||||
groups:
|
||||
- k3s_cluster
|
||||
- master
|
||||
interfaces:
|
||||
- network_name: private_network
|
||||
ip: 192.168.30.50
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
ANSIBLE_VERBOSITY: 1
|
||||
playbooks:
|
||||
converge: ../resources/converge.yml
|
||||
side_effect: ../resources/reset.yml
|
||||
verify: ../resources/verify.yml
|
||||
inventory:
|
||||
links:
|
||||
group_vars: ../../inventory/sample/group_vars
|
||||
scenario:
|
||||
test_sequence:
|
||||
- dependency
|
||||
- cleanup
|
||||
- destroy
|
||||
- syntax
|
||||
- create
|
||||
- prepare
|
||||
- converge
|
||||
# idempotence is not possible with the playbook in its current form.
|
||||
- verify
|
||||
# We are repurposing side_effect here to test the reset playbook.
|
||||
# This is why we do not run it before verify (which tests the cluster),
|
||||
# but after the verify step.
|
||||
- side_effect
|
||||
- cleanup
|
||||
- destroy
|
||||
16
k3s-ansible-copia/molecule/single_node/overrides.yml
Normal file
16
k3s-ansible-copia/molecule/single_node/overrides.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Apply overrides
|
||||
hosts: all
|
||||
tasks:
|
||||
- name: Override host variables
|
||||
ansible.builtin.set_fact:
|
||||
# See:
|
||||
# https://github.com/flannel-io/flannel/blob/67d603aaf45ef80f5dd39f43714fc5e6f8a637eb/Documentation/troubleshooting.md#Vagrant
|
||||
flannel_iface: eth1
|
||||
|
||||
# The test VMs might be a bit slow, so we give them more time to join the cluster:
|
||||
retry_count: 45
|
||||
|
||||
# Make sure that our IP ranges do not collide with those of the default scenario
|
||||
apiserver_endpoint: 192.168.30.223
|
||||
metal_lb_ip_range: 192.168.30.91-192.168.30.99
|
||||
3
k3s-ansible-copia/reboot.sh
Executable file
3
k3s-ansible-copia/reboot.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook reboot.yml
|
||||
10
k3s-ansible-copia/reboot.yml
Normal file
10
k3s-ansible-copia/reboot.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Reboot k3s_cluster
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
tasks:
|
||||
- name: Reboot the nodes (and Wait upto 5 mins max)
|
||||
become: true
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
reboot_timeout: 300
|
||||
10
k3s-ansible-copia/requirements.in
Normal file
10
k3s-ansible-copia/requirements.in
Normal file
@ -0,0 +1,10 @@
|
||||
ansible-core>=2.16.2
|
||||
jmespath>=1.0.1
|
||||
jsonpatch>=1.33
|
||||
kubernetes>=29.0.0
|
||||
molecule-plugins[vagrant]
|
||||
molecule>=6.0.3
|
||||
netaddr>=0.10.1
|
||||
pre-commit>=3.6.0
|
||||
pre-commit-hooks>=4.5.0
|
||||
pyyaml>=6.0.1
|
||||
169
k3s-ansible-copia/requirements.txt
Normal file
169
k3s-ansible-copia/requirements.txt
Normal file
@ -0,0 +1,169 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with Python 3.11
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile requirements.in
|
||||
#
|
||||
ansible-compat==4.1.11
|
||||
# via molecule
|
||||
ansible-core==2.18.0
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
# molecule
|
||||
attrs==23.2.0
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
bracex==2.4
|
||||
# via wcmatch
|
||||
cachetools==5.3.2
|
||||
# via google-auth
|
||||
certifi==2023.11.17
|
||||
# via
|
||||
# kubernetes
|
||||
# requests
|
||||
cffi==1.16.0
|
||||
# via cryptography
|
||||
cfgv==3.4.0
|
||||
# via pre-commit
|
||||
charset-normalizer==3.3.2
|
||||
# via requests
|
||||
click==8.1.7
|
||||
# via
|
||||
# click-help-colors
|
||||
# molecule
|
||||
click-help-colors==0.9.4
|
||||
# via molecule
|
||||
cryptography==41.0.7
|
||||
# via ansible-core
|
||||
distlib==0.3.8
|
||||
# via virtualenv
|
||||
enrich==1.2.7
|
||||
# via molecule
|
||||
filelock==3.13.1
|
||||
# via virtualenv
|
||||
google-auth==2.26.2
|
||||
# via kubernetes
|
||||
identify==2.5.33
|
||||
# via pre-commit
|
||||
idna==3.6
|
||||
# via requests
|
||||
jinja2==3.1.3
|
||||
# via
|
||||
# ansible-core
|
||||
# molecule
|
||||
jmespath==1.0.1
|
||||
# via -r requirements.in
|
||||
jsonpatch==1.33
|
||||
# via -r requirements.in
|
||||
jsonpointer==2.4
|
||||
# via jsonpatch
|
||||
jsonschema==4.21.1
|
||||
# via
|
||||
# ansible-compat
|
||||
# molecule
|
||||
jsonschema-specifications==2023.12.1
|
||||
# via jsonschema
|
||||
kubernetes==29.0.0
|
||||
# via -r requirements.in
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
markupsafe==2.1.4
|
||||
# via jinja2
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
molecule==6.0.3
|
||||
# via
|
||||
# -r requirements.in
|
||||
# molecule-plugins
|
||||
molecule-plugins[vagrant]==23.5.3
|
||||
# via -r requirements.in
|
||||
netaddr==0.10.1
|
||||
# via -r requirements.in
|
||||
nodeenv==1.8.0
|
||||
# via pre-commit
|
||||
oauthlib==3.2.2
|
||||
# via
|
||||
# kubernetes
|
||||
# requests-oauthlib
|
||||
packaging==23.2
|
||||
# via
|
||||
# ansible-compat
|
||||
# ansible-core
|
||||
# molecule
|
||||
platformdirs==4.1.0
|
||||
# via virtualenv
|
||||
pluggy==1.3.0
|
||||
# via molecule
|
||||
pre-commit==3.8.0
|
||||
# via -r requirements.in
|
||||
pre-commit-hooks==4.6.0
|
||||
# via -r requirements.in
|
||||
pyasn1==0.5.1
|
||||
# via
|
||||
# pyasn1-modules
|
||||
# rsa
|
||||
pyasn1-modules==0.3.0
|
||||
# via google-auth
|
||||
pycparser==2.21
|
||||
# via cffi
|
||||
pygments==2.17.2
|
||||
# via rich
|
||||
python-dateutil==2.8.2
|
||||
# via kubernetes
|
||||
python-vagrant==1.0.0
|
||||
# via molecule-plugins
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# -r requirements.in
|
||||
# ansible-compat
|
||||
# ansible-core
|
||||
# kubernetes
|
||||
# molecule
|
||||
# pre-commit
|
||||
referencing==0.32.1
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
requests==2.31.0
|
||||
# via
|
||||
# kubernetes
|
||||
# requests-oauthlib
|
||||
requests-oauthlib==1.3.1
|
||||
# via kubernetes
|
||||
resolvelib==1.0.1
|
||||
# via ansible-core
|
||||
rich==13.7.0
|
||||
# via
|
||||
# enrich
|
||||
# molecule
|
||||
rpds-py==0.17.1
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
rsa==4.9
|
||||
# via google-auth
|
||||
ruamel-yaml==0.18.5
|
||||
# via pre-commit-hooks
|
||||
ruamel-yaml-clib==0.2.8
|
||||
# via ruamel-yaml
|
||||
six==1.16.0
|
||||
# via
|
||||
# kubernetes
|
||||
# python-dateutil
|
||||
subprocess-tee==0.4.1
|
||||
# via ansible-compat
|
||||
urllib3==2.1.0
|
||||
# via
|
||||
# kubernetes
|
||||
# requests
|
||||
virtualenv==20.25.0
|
||||
# via pre-commit
|
||||
wcmatch==8.5
|
||||
# via molecule
|
||||
websocket-client==1.7.0
|
||||
# via kubernetes
|
||||
|
||||
# The following packages are considered to be unsafe in a requirements file:
|
||||
# setuptools
|
||||
3
k3s-ansible-copia/reset.sh
Executable file
3
k3s-ansible-copia/reset.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
ansible-playbook reset.yml
|
||||
25
k3s-ansible-copia/reset.yml
Normal file
25
k3s-ansible-copia/reset.yml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
- name: Reset k3s cluster
|
||||
hosts: k3s_cluster
|
||||
gather_facts: true
|
||||
roles:
|
||||
- role: reset
|
||||
become: true
|
||||
- role: raspberrypi
|
||||
become: true
|
||||
vars: { state: absent }
|
||||
post_tasks:
|
||||
- name: Reboot and wait for node to come back up
|
||||
become: true
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
reboot_timeout: 3600
|
||||
|
||||
- name: Revert changes to Proxmox cluster
|
||||
hosts: proxmox
|
||||
gather_facts: true
|
||||
become: true
|
||||
remote_user: "{{ proxmox_lxc_ssh_user }}"
|
||||
roles:
|
||||
- role: reset_proxmox_lxc
|
||||
when: proxmox_lxc_configure
|
||||
8
k3s-ansible-copia/roles/download/meta/main.yml
Normal file
8
k3s-ansible-copia/roles/download/meta/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Manage the downloading of K3S binaries
|
||||
options:
|
||||
k3s_version:
|
||||
description: The desired version of K3S
|
||||
required: true
|
||||
34
k3s-ansible-copia/roles/download/tasks/main.yml
Normal file
34
k3s-ansible-copia/roles/download/tasks/main.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Download k3s binary x64
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
when: ansible_facts.architecture == "x86_64"
|
||||
|
||||
- name: Download k3s binary arm64
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
when:
|
||||
- ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" )
|
||||
or ansible_facts.architecture is search("aarch64")
|
||||
|
||||
- name: Download k3s binary armhf
|
||||
ansible.builtin.get_url:
|
||||
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
|
||||
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
|
||||
dest: /usr/local/bin/k3s
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
when:
|
||||
- ansible_facts.architecture is search("arm")
|
||||
- ansible_facts.userspace_bits == "32"
|
||||
3
k3s-ansible-copia/roles/k3s/node/defaults/main.yml
Normal file
3
k3s-ansible-copia/roles/k3s/node/defaults/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
# Name of the master group
|
||||
group_name_master: master
|
||||
4
k3s-ansible-copia/roles/k3s_agent/defaults/main.yml
Normal file
4
k3s-ansible-copia/roles/k3s_agent/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
extra_agent_args: ""
|
||||
group_name_master: master
|
||||
systemd_dir: /etc/systemd/system
|
||||
39
k3s-ansible-copia/roles/k3s_agent/meta/main.yml
Normal file
39
k3s-ansible-copia/roles/k3s_agent/meta/main.yml
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Setup k3s agents
|
||||
options:
|
||||
apiserver_endpoint:
|
||||
description: Virtual ip-address configured on each master
|
||||
required: true
|
||||
|
||||
extra_agent_args:
|
||||
description: Extra arguments for agents nodes
|
||||
|
||||
group_name_master:
|
||||
description: Name of the master group
|
||||
default: master
|
||||
|
||||
k3s_token:
|
||||
description: Token used to communicate between masters
|
||||
|
||||
proxy_env:
|
||||
type: dict
|
||||
description:
|
||||
- Internet proxy configurations.
|
||||
- See https://docs.k3s.io/advanced#configuring-an-http-proxy for details
|
||||
default: ~
|
||||
options:
|
||||
HTTP_PROXY:
|
||||
description: HTTP internet proxy
|
||||
required: true
|
||||
HTTPS_PROXY:
|
||||
description: HTTPS internet proxy
|
||||
required: true
|
||||
NO_PROXY:
|
||||
description: Addresses that will not use the proxies
|
||||
required: true
|
||||
|
||||
systemd_dir:
|
||||
description: Path to systemd services
|
||||
default: /etc/systemd/system
|
||||
18
k3s-ansible-copia/roles/k3s_agent/tasks/http_proxy.yml
Normal file
18
k3s-ansible-copia/roles/k3s_agent/tasks/http_proxy.yml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Create k3s-node.service.d directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ systemd_dir }}/k3s-node.service.d"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Copy K3s http_proxy conf file
|
||||
ansible.builtin.template:
|
||||
src: http_proxy.conf.j2
|
||||
dest: "{{ systemd_dir }}/k3s-node.service.d/http_proxy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
when: proxy_env is defined
|
||||
36
k3s-ansible-copia/roles/k3s_agent/tasks/main.yml
Normal file
36
k3s-ansible-copia/roles/k3s_agent/tasks/main.yml
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Check for PXE-booted system
|
||||
block:
|
||||
- name: Check if system is PXE-booted
|
||||
ansible.builtin.command:
|
||||
cmd: cat /proc/cmdline
|
||||
register: boot_cmdline
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
|
||||
- name: Set fact for PXE-booted system
|
||||
ansible.builtin.set_fact:
|
||||
is_pxe_booted: "{{ 'root=/dev/nfs' in boot_cmdline.stdout }}"
|
||||
when: boot_cmdline.stdout is defined
|
||||
|
||||
- name: Include http_proxy configuration tasks
|
||||
ansible.builtin.include_tasks: http_proxy.yml
|
||||
|
||||
- name: Deploy K3s http_proxy conf
|
||||
ansible.builtin.include_tasks: http_proxy.yml
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Configure the k3s service
|
||||
ansible.builtin.template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ systemd_dir }}/k3s-node.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Manage k3s service
|
||||
ansible.builtin.systemd:
|
||||
name: k3s-node
|
||||
daemon_reload: true
|
||||
state: restarted
|
||||
enabled: true
|
||||
@ -0,0 +1,4 @@
|
||||
[Service]
|
||||
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||
27
k3s-ansible-copia/roles/k3s_agent/templates/k3s.service.j2
Normal file
27
k3s-ansible-copia/roles/k3s_agent/templates/k3s.service.j2
Normal file
@ -0,0 +1,27 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
# Conditional snapshotter based on PXE boot status
|
||||
ExecStart=/usr/local/bin/k3s agent \
|
||||
--server https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443 \
|
||||
{% if is_pxe_booted | default(false) %}--snapshotter native \
|
||||
{% endif %}--token {{ hostvars[groups[group_name_master | default('master')][0]]['token'] | default(k3s_token) }} \
|
||||
{{ extra_agent_args }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
20
k3s-ansible-copia/roles/k3s_custom_registries/meta/main.yml
Normal file
20
k3s-ansible-copia/roles/k3s_custom_registries/meta/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Configure the use of a custom container registry
|
||||
options:
|
||||
custom_registries_yaml:
|
||||
description:
|
||||
- YAML block defining custom registries.
|
||||
- >
|
||||
The following is an example that pulls all images used in
|
||||
this playbook through your private registries.
|
||||
- >
|
||||
It also allows you to pull your own images from your private
|
||||
registry, without having to use imagePullSecrets in your
|
||||
deployments.
|
||||
- >
|
||||
If all you need is your own images and you don't care about
|
||||
caching the docker/quay/ghcr.io images, you can just remove
|
||||
those from the mirrors: section.
|
||||
required: true
|
||||
16
k3s-ansible-copia/roles/k3s_custom_registries/tasks/main.yml
Normal file
16
k3s-ansible-copia/roles/k3s_custom_registries/tasks/main.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Create directory /etc/rancher/k3s
|
||||
ansible.builtin.file:
|
||||
path: /etc/{{ item }}
|
||||
state: directory
|
||||
mode: "0755"
|
||||
loop:
|
||||
- rancher
|
||||
- rancher/k3s
|
||||
|
||||
- name: Insert registries into /etc/rancher/k3s/registries.yaml
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/rancher/k3s/registries.yaml
|
||||
block: "{{ custom_registries_yaml }}"
|
||||
mode: "0600"
|
||||
create: true
|
||||
40
k3s-ansible-copia/roles/k3s_server/defaults/main.yml
Normal file
40
k3s-ansible-copia/roles/k3s_server/defaults/main.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
extra_server_args: ""
|
||||
|
||||
k3s_kubectl_binary: k3s kubectl
|
||||
|
||||
group_name_master: master
|
||||
|
||||
kube_vip_arp: true
|
||||
kube_vip_iface:
|
||||
kube_vip_cloud_provider_tag_version: main
|
||||
kube_vip_tag_version: v0.7.2
|
||||
|
||||
kube_vip_bgp: false
|
||||
kube_vip_bgp_routerid: 127.0.0.1
|
||||
kube_vip_bgp_as: "64513"
|
||||
kube_vip_bgp_peeraddress: 192.168.30.1
|
||||
kube_vip_bgp_peeras: "64512"
|
||||
|
||||
kube_vip_bgp_peers: []
|
||||
kube_vip_bgp_peers_groups: ['k3s_master']
|
||||
|
||||
metal_lb_controller_tag_version: v0.14.3
|
||||
metal_lb_speaker_tag_version: v0.14.3
|
||||
metal_lb_type: native
|
||||
|
||||
retry_count: 20
|
||||
|
||||
# yamllint disable rule:line-length
|
||||
server_init_args: >-
|
||||
{% if groups[group_name_master | default('master')] | length > 1 %}
|
||||
{% if ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname'] %}
|
||||
--cluster-init
|
||||
{% else %}
|
||||
--server https://{{ hostvars[groups[group_name_master | default('master')][0]].k3s_node_ip | split(",") | first | ansible.utils.ipwrap }}:6443
|
||||
{% endif %}
|
||||
--token {{ k3s_token }}
|
||||
{% endif %}
|
||||
{{ extra_server_args }}
|
||||
|
||||
systemd_dir: /etc/systemd/system
|
||||
135
k3s-ansible-copia/roles/k3s_server/meta/main.yml
Normal file
135
k3s-ansible-copia/roles/k3s_server/meta/main.yml
Normal file
@ -0,0 +1,135 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Setup k3s servers
|
||||
options:
|
||||
apiserver_endpoint:
|
||||
description: Virtual ip-address configured on each master
|
||||
required: true
|
||||
|
||||
cilium_bgp:
|
||||
description:
|
||||
- Enable cilium BGP control plane for LB services and pod cidrs.
|
||||
- Disables the use of MetalLB.
|
||||
type: bool
|
||||
default: ~
|
||||
|
||||
cilium_iface:
|
||||
description: The network interface used for when Cilium is enabled
|
||||
default: ~
|
||||
|
||||
extra_server_args:
|
||||
description: Extra arguments for server nodes
|
||||
default: ""
|
||||
|
||||
group_name_master:
|
||||
description: Name of the master group
|
||||
default: master
|
||||
|
||||
k3s_create_kubectl_symlink:
|
||||
description: Create the kubectl -> k3s symlink
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
k3s_create_crictl_symlink:
|
||||
description: Create the crictl -> k3s symlink
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
kube_vip_arp:
|
||||
description: Enables kube-vip ARP broadcasts
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
kube_vip_bgp:
|
||||
description: Enables kube-vip BGP peering
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
kube_vip_bgp_routerid:
|
||||
description: Defines the router ID for the kube-vip BGP server
|
||||
default: "127.0.0.1"
|
||||
|
||||
kube_vip_bgp_as:
|
||||
description: Defines the AS for the kube-vip BGP server
|
||||
default: "64513"
|
||||
|
||||
kube_vip_bgp_peeraddress:
|
||||
description: Defines the address for the kube-vip BGP peer
|
||||
default: "192.168.30.1"
|
||||
|
||||
kube_vip_bgp_peeras:
|
||||
description: Defines the AS for the kube-vip BGP peer
|
||||
default: "64512"
|
||||
|
||||
kube_vip_bgp_peers:
|
||||
description: List of BGP peer ASN & address pairs
|
||||
default: []
|
||||
|
||||
kube_vip_bgp_peers_groups:
|
||||
description: Inventory group in which to search for additional kube_vip_bgp_peers parameters to merge.
|
||||
default: ['k3s_master']
|
||||
|
||||
kube_vip_iface:
|
||||
description:
|
||||
- Explicitly define an interface that ALL control nodes
|
||||
- should use to propagate the VIP, define it here.
|
||||
- Otherwise, kube-vip will determine the right interface
|
||||
- automatically at runtime.
|
||||
default: ~
|
||||
|
||||
kube_vip_tag_version:
|
||||
description: Image tag for kube-vip
|
||||
default: v0.7.2
|
||||
|
||||
kube_vip_cloud_provider_tag_version:
|
||||
description: Tag for kube-vip-cloud-provider manifest when enabled
|
||||
default: main
|
||||
|
||||
kube_vip_lb_ip_range:
|
||||
description: IP range for kube-vip load balancer
|
||||
default: ~
|
||||
|
||||
metal_lb_controller_tag_version:
|
||||
description: Image tag for MetalLB
|
||||
default: v0.14.3
|
||||
|
||||
metal_lb_speaker_tag_version:
|
||||
description: Image tag for MetalLB
|
||||
default: v0.14.3
|
||||
|
||||
metal_lb_type:
|
||||
choices:
|
||||
- frr
|
||||
- native
|
||||
default: native
|
||||
description: Use FRR mode or native. Valid values are `frr` and `native`
|
||||
|
||||
proxy_env:
|
||||
type: dict
|
||||
description:
|
||||
- Internet proxy configurations.
|
||||
- See https://docs.k3s.io/advanced#configuring-an-http-proxy for details
|
||||
default: ~
|
||||
options:
|
||||
HTTP_PROXY:
|
||||
description: HTTP internet proxy
|
||||
required: true
|
||||
HTTPS_PROXY:
|
||||
description: HTTPS internet proxy
|
||||
required: true
|
||||
NO_PROXY:
|
||||
description: Addresses that will not use the proxies
|
||||
required: true
|
||||
|
||||
retry_count:
|
||||
description: Amount of retries when verifying that nodes joined
|
||||
type: int
|
||||
default: 20
|
||||
|
||||
server_init_args:
|
||||
description: Arguments for server nodes
|
||||
|
||||
systemd_dir:
|
||||
description: Path to systemd services
|
||||
default: /etc/systemd/system
|
||||
@ -0,0 +1,28 @@
|
||||
---
|
||||
# Download logs of k3s-init.service from the nodes to localhost.
|
||||
# Note that log_destination must be set.
|
||||
|
||||
- name: Fetch k3s-init.service logs
|
||||
ansible.builtin.command:
|
||||
cmd: journalctl --all --unit=k3s-init.service
|
||||
changed_when: false
|
||||
register: k3s_init_log
|
||||
|
||||
- name: Create {{ log_destination }}
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
become: false
|
||||
ansible.builtin.file:
|
||||
path: "{{ log_destination }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Store logs to {{ log_destination }}
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
ansible.builtin.template:
|
||||
src: content.j2
|
||||
dest: "{{ log_destination }}/k3s-init@{{ ansible_hostname }}.log"
|
||||
mode: "0644"
|
||||
vars:
|
||||
content: "{{ k3s_init_log.stdout }}"
|
||||
16
k3s-ansible-copia/roles/k3s_server/tasks/http_proxy.yml
Normal file
16
k3s-ansible-copia/roles/k3s_server/tasks/http_proxy.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Create k3s.service.d directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ systemd_dir }}/k3s.service.d"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy K3s http_proxy conf file
|
||||
ansible.builtin.template:
|
||||
src: http_proxy.conf.j2
|
||||
dest: "{{ systemd_dir }}/k3s.service.d/http_proxy.conf"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
27
k3s-ansible-copia/roles/k3s_server/tasks/kube-vip.yml
Normal file
27
k3s-ansible-copia/roles/k3s_server/tasks/kube-vip.yml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip cloud provider manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/{{ kube_vip_cloud_provider_tag_version | default('main') }}/manifest/kube-vip-cloud-controller.yaml # noqa yaml[line-length]
|
||||
dest: /var/lib/rancher/k3s/server/manifests/kube-vip-cloud-controller.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy kubevip configMap manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: kubevip.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/kubevip.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
173
k3s-ansible-copia/roles/k3s_server/tasks/main.yml
Normal file
173
k3s-ansible-copia/roles/k3s_server/tasks/main.yml
Normal file
@ -0,0 +1,173 @@
|
||||
---
|
||||
- name: Stop k3s-init
|
||||
ansible.builtin.systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
# k3s-init won't work if the port is already in use
|
||||
- name: Stop k3s
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Clean previous runs of k3s-init # noqa command-instead-of-module
|
||||
# The systemd module does not support "reset-failed", so we need to resort to command.
|
||||
ansible.builtin.command: systemctl reset-failed k3s-init
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
|
||||
- name: Deploy K3s http_proxy conf
|
||||
ansible.builtin.include_tasks: http_proxy.yml
|
||||
when: proxy_env is defined
|
||||
|
||||
- name: Deploy vip manifest
|
||||
ansible.builtin.include_tasks: vip.yml
|
||||
- name: Deploy metallb manifest
|
||||
ansible.builtin.include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||
|
||||
- name: Deploy kube-vip manifest
|
||||
ansible.builtin.include_tasks: kube-vip.yml
|
||||
tags: kubevip
|
||||
when: kube_vip_lb_ip_range is defined
|
||||
|
||||
- name: Init cluster inside the transient k3s-init service
|
||||
ansible.builtin.command:
|
||||
cmd: systemd-run -p RestartSec=2 -p Restart=on-failure --unit=k3s-init k3s server {{ server_init_args }}
|
||||
creates: "{{ systemd_dir }}/k3s-init.service"
|
||||
|
||||
- name: Verification
|
||||
when: not ansible_check_mode
|
||||
block:
|
||||
- name: Verify that all nodes actually joined (check k3s-init.service if this fails)
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" # yamllint disable-line rule:line-length
|
||||
register: nodes
|
||||
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups[group_name_master | default('master')] | length) # yamllint disable-line rule:line-length
|
||||
retries: "{{ retry_count | default(20) }}"
|
||||
delay: 10
|
||||
changed_when: false
|
||||
always:
|
||||
- name: Save logs of k3s-init.service
|
||||
ansible.builtin.include_tasks: fetch_k3s_init_logs.yml
|
||||
when: log_destination
|
||||
vars:
|
||||
log_destination: >-
|
||||
{{ lookup('ansible.builtin.env', 'ANSIBLE_K3S_LOG_DIR', default=False) }}
|
||||
- name: Kill the temporary service used for initialization
|
||||
ansible.builtin.systemd:
|
||||
name: k3s-init
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: Copy K3s service file
|
||||
register: k3s_service
|
||||
ansible.builtin.template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ systemd_dir }}/k3s.service"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Enable and check K3s service
|
||||
ansible.builtin.systemd:
|
||||
name: k3s
|
||||
daemon_reload: true
|
||||
state: restarted
|
||||
enabled: true
|
||||
|
||||
- name: Wait for node-token
|
||||
ansible.builtin.wait_for:
|
||||
path: /var/lib/rancher/k3s/server/node-token
|
||||
|
||||
- name: Register node-token file access mode
|
||||
ansible.builtin.stat:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
register: p
|
||||
|
||||
- name: Change file access node-token
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: g+rx,o+rx
|
||||
|
||||
- name: Read node-token from master
|
||||
ansible.builtin.slurp:
|
||||
src: /var/lib/rancher/k3s/server/node-token
|
||||
register: node_token
|
||||
|
||||
- name: Store Master node-token
|
||||
ansible.builtin.set_fact:
|
||||
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
|
||||
|
||||
- name: Restore node-token file access
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server
|
||||
mode: "{{ p.stat.mode }}"
|
||||
|
||||
- name: Create directory .kube
|
||||
ansible.builtin.file:
|
||||
path: "{{ ansible_user_dir }}/.kube"
|
||||
state: directory
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: u=rwx,g=rx,o=
|
||||
|
||||
- name: Copy config file to user home directory
|
||||
ansible.builtin.copy:
|
||||
src: /etc/rancher/k3s/k3s.yaml
|
||||
dest: "{{ ansible_user_dir }}/.kube/config"
|
||||
remote_src: true
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: u=rw,g=,o=
|
||||
|
||||
- name: Configure kubectl cluster to {{ endpoint_url }}
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} config set-cluster default
|
||||
--server={{ endpoint_url }}
|
||||
--kubeconfig {{ ansible_user_dir }}/.kube/config
|
||||
changed_when: true
|
||||
vars:
|
||||
endpoint_url: >-
|
||||
https://{{ apiserver_endpoint | ansible.utils.ipwrap }}:6443
|
||||
# Deactivated linter rules:
|
||||
# - jinja[invalid]: As of version 6.6.0, ansible-lint complains that the input to ipwrap
|
||||
# would be undefined. This will not be the case during playbook execution.
|
||||
# noqa jinja[invalid]
|
||||
|
||||
- name: Create kubectl symlink
|
||||
ansible.builtin.file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/kubectl
|
||||
state: link
|
||||
when: k3s_create_kubectl_symlink | default(true) | bool
|
||||
|
||||
- name: Create crictl symlink
|
||||
ansible.builtin.file:
|
||||
src: /usr/local/bin/k3s
|
||||
dest: /usr/local/bin/crictl
|
||||
state: link
|
||||
when: k3s_create_crictl_symlink | default(true) | bool
|
||||
|
||||
- name: Get contents of manifests folder
|
||||
ansible.builtin.find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: file
|
||||
register: k3s_server_manifests
|
||||
|
||||
- name: Get sub dirs of manifests folder
|
||||
ansible.builtin.find:
|
||||
paths: /var/lib/rancher/k3s/server/manifests
|
||||
file_type: directory
|
||||
register: k3s_server_manifests_directories
|
||||
|
||||
- name: Remove manifests and folders that are only needed for bootstrapping cluster so k3s doesn't auto apply on start
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- "{{ k3s_server_manifests.files }}"
|
||||
- "{{ k3s_server_manifests_directories.files }}"
|
||||
loop_control:
|
||||
label: "{{ item.path }}"
|
||||
30
k3s-ansible-copia/roles/k3s_server/tasks/metallb.yml
Normal file
30
k3s-ansible-copia/roles/k3s_server/tasks/metallb.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Create manifests directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: "Download to first master: manifest for metallb-{{ metal_lb_type }}"
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/metallb/metallb/{{ metal_lb_controller_tag_version }}/config/manifests/metallb-{{ metal_lb_type }}.yaml # noqa yaml[line-length]
|
||||
dest: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Set image versions in manifest for metallb-{{ metal_lb_type }}
|
||||
ansible.builtin.replace:
|
||||
path: /var/lib/rancher/k3s/server/manifests/metallb-crds.yaml
|
||||
regexp: "{{ item.change | ansible.builtin.regex_escape }}"
|
||||
replace: "{{ item.to }}"
|
||||
with_items:
|
||||
- change: metallb/speaker:{{ metal_lb_controller_tag_version }}
|
||||
to: metallb/speaker:{{ metal_lb_speaker_tag_version }}
|
||||
loop_control:
|
||||
label: "{{ item.change }} => {{ item.to }}"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
31
k3s-ansible-copia/roles/k3s_server/tasks/vip.yml
Normal file
31
k3s-ansible-copia/roles/k3s_server/tasks/vip.yml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Set _kube_vip_bgp_peers fact
|
||||
ansible.builtin.set_fact:
|
||||
_kube_vip_bgp_peers: "{{ lookup('community.general.merge_variables', '^kube_vip_bgp_peers__.+$', initial_value=kube_vip_bgp_peers, groups=kube_vip_bgp_peers_groups) }}" # yamllint disable-line rule:line-length
|
||||
|
||||
- name: Create manifests directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Download vip rbac manifest to first master
|
||||
ansible.builtin.get_url:
|
||||
url: https://kube-vip.io/manifests/rbac.yaml
|
||||
dest: /var/lib/rancher/k3s/server/manifests/vip-rbac.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
|
||||
- name: Copy vip manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: vip.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/vip.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
5
k3s-ansible-copia/roles/k3s_server/templates/content.j2
Normal file
5
k3s-ansible-copia/roles/k3s_server/templates/content.j2
Normal file
@ -0,0 +1,5 @@
|
||||
{#
|
||||
This is a really simple template that just outputs the
|
||||
value of the "content" variable.
|
||||
#}
|
||||
{{ content }}
|
||||
@ -0,0 +1,4 @@
|
||||
[Service]
|
||||
Environment=HTTP_PROXY={{ proxy_env.HTTP_PROXY }}
|
||||
Environment=HTTPS_PROXY={{ proxy_env.HTTPS_PROXY }}
|
||||
Environment=NO_PROXY={{ proxy_env.NO_PROXY }}
|
||||
24
k3s-ansible-copia/roles/k3s_server/templates/k3s.service.j2
Normal file
24
k3s-ansible-copia/roles/k3s_server/templates/k3s.service.j2
Normal file
@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Lightweight Kubernetes
|
||||
Documentation=https://k3s.io
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStartPre=-/sbin/modprobe br_netfilter
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/k3s server {{ extra_server_args | default("") }}
|
||||
KillMode=process
|
||||
Delegate=yes
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
13
k3s-ansible-copia/roles/k3s_server/templates/kubevip.yaml.j2
Normal file
13
k3s-ansible-copia/roles/k3s_server/templates/kubevip.yaml.j2
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubevip
|
||||
namespace: kube-system
|
||||
data:
|
||||
{% if kube_vip_lb_ip_range is string %}
|
||||
{# kube_vip_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||
{# => transform to list with single element #}
|
||||
{% set kube_vip_lb_ip_range = [kube_vip_lb_ip_range] %}
|
||||
{% endif %}
|
||||
range-global: {{ kube_vip_lb_ip_range | join(',') }}
|
||||
104
k3s-ansible-copia/roles/k3s_server/templates/vip.yaml.j2
Normal file
104
k3s-ansible-copia/roles/k3s_server/templates/vip.yaml.j2
Normal file
@ -0,0 +1,104 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-vip-ds
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kube-vip-ds
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kube-vip-ds
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
containers:
|
||||
- args:
|
||||
- manager
|
||||
env:
|
||||
- name: vip_arp
|
||||
value: "{{ 'true' if kube_vip_arp | default(true) | bool else 'false' }}"
|
||||
- name: bgp_enable
|
||||
value: "{{ 'true' if kube_vip_bgp | default(false) | bool else 'false' }}"
|
||||
- name: port
|
||||
value: "6443"
|
||||
{% if kube_vip_iface %}
|
||||
- name: vip_interface
|
||||
value: {{ kube_vip_iface }}
|
||||
{% endif %}
|
||||
- name: vip_cidr
|
||||
value: "{{ apiserver_endpoint | ansible.utils.ipsubnet | ansible.utils.ipaddr('prefix') }}"
|
||||
- name: cp_enable
|
||||
value: "true"
|
||||
- name: cp_namespace
|
||||
value: kube-system
|
||||
- name: vip_ddns
|
||||
value: "false"
|
||||
- name: svc_enable
|
||||
value: "{{ 'true' if kube_vip_lb_ip_range is defined else 'false' }}"
|
||||
- name: vip_leaderelection
|
||||
value: "true"
|
||||
- name: vip_leaseduration
|
||||
value: "15"
|
||||
- name: vip_renewdeadline
|
||||
value: "10"
|
||||
- name: vip_retryperiod
|
||||
value: "2"
|
||||
- name: address
|
||||
value: {{ apiserver_endpoint }}
|
||||
{% if kube_vip_bgp | default(false) | bool %}
|
||||
{% if kube_vip_bgp_routerid is defined %}
|
||||
- name: bgp_routerid
|
||||
value: "{{ kube_vip_bgp_routerid }}"
|
||||
{% endif %}
|
||||
{% if _kube_vip_bgp_peers | length > 0 %}
|
||||
- name: bgppeers
|
||||
value: "{{ _kube_vip_bgp_peers | map(attribute='peer_address') | zip(_kube_vip_bgp_peers| map(attribute='peer_asn')) | map('join', ',') | join(':') }}" # yamllint disable-line rule:line-length
|
||||
{% else %}
|
||||
{% if kube_vip_bgp_as is defined %}
|
||||
- name: bgp_as
|
||||
value: "{{ kube_vip_bgp_as }}"
|
||||
{% endif %}
|
||||
{% if kube_vip_bgp_peeraddress is defined %}
|
||||
- name: bgp_peeraddress
|
||||
value: "{{ kube_vip_bgp_peeraddress }}"
|
||||
{% endif %}
|
||||
{% if kube_vip_bgp_peeras is defined %}
|
||||
- name: bgp_peeras
|
||||
value: "{{ kube_vip_bgp_peeras }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_tag_version }}
|
||||
imagePullPolicy: Always
|
||||
name: kube-vip
|
||||
resources: {}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_TIME
|
||||
hostNetwork: true
|
||||
serviceAccountName: kube-vip
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
updateStrategy: {}
|
||||
status:
|
||||
currentNumberScheduled: 0
|
||||
desiredNumberScheduled: 0
|
||||
numberMisscheduled: 0
|
||||
numberReady: 0
|
||||
32
k3s-ansible-copia/roles/k3s_server_post/defaults/main.yml
Normal file
32
k3s-ansible-copia/roles/k3s_server_post/defaults/main.yml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
k3s_kubectl_binary: k3s kubectl
|
||||
|
||||
bpf_lb_algorithm: maglev
|
||||
bpf_lb_mode: hybrid
|
||||
|
||||
calico_blockSize: 26 # noqa var-naming
|
||||
calico_ebpf: false
|
||||
calico_encapsulation: VXLANCrossSubnet
|
||||
calico_natOutgoing: Enabled # noqa var-naming
|
||||
calico_nodeSelector: all() # noqa var-naming
|
||||
calico_tag: v3.27.2
|
||||
|
||||
cilium_bgp: false
|
||||
cilium_exportPodCIDR: true # noqa var-naming
|
||||
cilium_bgp_my_asn: 64513
|
||||
cilium_bgp_peer_asn: 64512
|
||||
cilium_bgp_neighbors: []
|
||||
cilium_bgp_neighbors_groups: ['k3s_all']
|
||||
cilium_bgp_lb_cidr: 192.168.31.0/24
|
||||
cilium_hubble: true
|
||||
cilium_mode: native
|
||||
|
||||
cluster_cidr: 10.52.0.0/16
|
||||
enable_bpf_masquerade: true
|
||||
kube_proxy_replacement: true
|
||||
group_name_master: master
|
||||
|
||||
metal_lb_mode: layer2
|
||||
metal_lb_available_timeout: 240s
|
||||
metal_lb_controller_tag_version: v0.14.3
|
||||
metal_lb_ip_range: 192.168.30.80-192.168.30.90
|
||||
153
k3s-ansible-copia/roles/k3s_server_post/meta/main.yml
Normal file
153
k3s-ansible-copia/roles/k3s_server_post/meta/main.yml
Normal file
@ -0,0 +1,153 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Configure k3s cluster
|
||||
options:
|
||||
apiserver_endpoint:
|
||||
description: Virtual ip-address configured on each master
|
||||
required: true
|
||||
|
||||
bpf_lb_algorithm:
|
||||
description: BPF lb algorithm
|
||||
default: maglev
|
||||
|
||||
bpf_lb_mode:
|
||||
description: BPF lb mode
|
||||
default: hybrid
|
||||
|
||||
calico_blockSize:
|
||||
description: IP pool block size
|
||||
type: int
|
||||
default: 26
|
||||
|
||||
calico_ebpf:
|
||||
description: Use eBPF dataplane instead of iptables
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
calico_encapsulation:
|
||||
description: IP pool encapsulation
|
||||
default: VXLANCrossSubnet
|
||||
|
||||
calico_natOutgoing:
|
||||
description: IP pool NAT outgoing
|
||||
default: Enabled
|
||||
|
||||
calico_nodeSelector:
|
||||
description: IP pool node selector
|
||||
default: all()
|
||||
|
||||
calico_iface:
|
||||
description: The network interface used for when Calico is enabled
|
||||
default: ~
|
||||
|
||||
calico_tag:
|
||||
description: Calico version tag
|
||||
default: v3.27.2
|
||||
|
||||
cilium_bgp:
|
||||
description:
|
||||
- Enable cilium BGP control plane for LB services and pod cidrs.
|
||||
- Disables the use of MetalLB.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
cilium_bgp_my_asn:
|
||||
description: Local ASN for BGP peer
|
||||
type: int
|
||||
default: 64513
|
||||
|
||||
cilium_bgp_peer_asn:
|
||||
description: BGP peer ASN
|
||||
type: int
|
||||
default: 64512
|
||||
|
||||
cilium_bgp_peer_address:
|
||||
description: BGP peer address
|
||||
default: ~
|
||||
|
||||
cilium_bgp_neighbors:
|
||||
description: List of BGP peer ASN & address pairs
|
||||
default: []
|
||||
|
||||
cilium_bgp_neighbors_groups:
|
||||
description: Inventory group in which to search for additional cilium_bgp_neighbors parameters to merge.
|
||||
default: ['k3s_all']
|
||||
|
||||
cilium_bgp_lb_cidr:
|
||||
description: BGP load balancer IP range
|
||||
default: 192.168.31.0/24
|
||||
|
||||
cilium_exportPodCIDR:
|
||||
description: Export pod CIDR
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
cilium_hubble:
|
||||
description: Enable Cilium Hubble
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
cilium_iface:
|
||||
description: The network interface used for when Cilium is enabled
|
||||
default: ~
|
||||
|
||||
cilium_mode:
|
||||
description: Inner-node communication mode
|
||||
default: native
|
||||
choices:
|
||||
- native
|
||||
- routed
|
||||
|
||||
cluster_cidr:
|
||||
description: Inner-cluster IP range
|
||||
default: 10.52.0.0/16
|
||||
|
||||
enable_bpf_masquerade:
|
||||
description: Use IP masquerading
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
group_name_master:
|
||||
description: Name of the master group
|
||||
default: master
|
||||
|
||||
kube_proxy_replacement:
|
||||
description: Replace the native kube-proxy with Cilium
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
kube_vip_lb_ip_range:
|
||||
description: IP range for kube-vip load balancer
|
||||
default: ~
|
||||
|
||||
metal_lb_available_timeout:
|
||||
description: Wait for MetalLB resources
|
||||
default: 240s
|
||||
|
||||
metal_lb_ip_range:
|
||||
description: MetalLB ip range for load balancer
|
||||
default: 192.168.30.80-192.168.30.90
|
||||
|
||||
metal_lb_controller_tag_version:
|
||||
description: Image tag for MetalLB
|
||||
default: v0.14.3
|
||||
|
||||
metal_lb_mode:
|
||||
description: Metallb mode
|
||||
default: layer2
|
||||
choices:
|
||||
- bgp
|
||||
- layer2
|
||||
|
||||
metal_lb_bgp_my_asn:
|
||||
description: BGP ASN configurations
|
||||
default: ~
|
||||
|
||||
metal_lb_bgp_peer_asn:
|
||||
description: BGP peer ASN configurations
|
||||
default: ~
|
||||
|
||||
metal_lb_bgp_peer_address:
|
||||
description: BGP peer address
|
||||
default: ~
|
||||
120
k3s-ansible-copia/roles/k3s_server_post/tasks/calico.yml
Normal file
120
k3s-ansible-copia/roles/k3s_server_post/tasks/calico.yml
Normal file
@ -0,0 +1,120 @@
|
||||
---
|
||||
- name: Deploy Calico to cluster
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
run_once: true
|
||||
block:
|
||||
- name: Create manifests directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml
|
||||
dest: /tmp/k3s/tigera-operator.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy Calico custom resources manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: calico.crs.j2
|
||||
dest: /tmp/k3s/custom-resources.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Deploy or replace Tigera Operator
|
||||
block:
|
||||
- name: Deploy Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/tigera-operator.yaml"
|
||||
register: create_operator
|
||||
changed_when: "'created' in create_operator.stdout"
|
||||
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
||||
rescue:
|
||||
- name: Replace existing Tigera Operator
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} replace -f /tmp/k3s/tigera-operator.yaml"
|
||||
register: replace_operator
|
||||
changed_when: "'replaced' in replace_operator.stdout"
|
||||
failed_when: "'Error' in replace_operator.stderr"
|
||||
|
||||
- name: Wait for Tigera Operator resources
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='tigera-operator'
|
||||
--for=condition=Available=True
|
||||
--timeout=30s
|
||||
register: tigera_result
|
||||
changed_when: false
|
||||
until: tigera_result is succeeded
|
||||
retries: 7
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: tigera-operator, type: deployment }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Deploy Calico custom resources
|
||||
block:
|
||||
- name: Deploy custom resources for Calico
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} create -f /tmp/k3s/custom-resources.yaml"
|
||||
register: create_cr
|
||||
changed_when: "'created' in create_cr.stdout"
|
||||
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
||||
rescue:
|
||||
- name: Apply new Calico custom resource manifest
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/custom-resources.yaml"
|
||||
register: apply_cr
|
||||
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||
failed_when: "'Error' in apply_cr.stderr"
|
||||
|
||||
- name: Wait for Calico system resources to be available
|
||||
ansible.builtin.command: >-
|
||||
{% if item.type == 'daemonset' %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
|
||||
--namespace='{{ item.namespace }}'
|
||||
--selector={{ item.selector }}
|
||||
--for=condition=Ready
|
||||
{% else %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||
--namespace='{{ item.namespace }}'
|
||||
--for=condition=Available
|
||||
{% endif %}
|
||||
--timeout=30s
|
||||
register: cr_result
|
||||
changed_when: false
|
||||
until: cr_result is succeeded
|
||||
retries: 30
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: calico-typha, type: deployment, namespace: calico-system }
|
||||
- { name: calico-kube-controllers, type: deployment, namespace: calico-system }
|
||||
- name: csi-node-driver
|
||||
type: daemonset
|
||||
selector: k8s-app=csi-node-driver
|
||||
namespace: calico-system
|
||||
- name: calico-node
|
||||
type: daemonset
|
||||
selector: k8s-app=calico-node
|
||||
namespace: calico-system
|
||||
- { name: calico-apiserver, type: deployment, namespace: calico-apiserver }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
|
||||
- name: Patch Felix configuration for eBPF mode
|
||||
ansible.builtin.command:
|
||||
cmd: >
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} patch felixconfiguration default
|
||||
--type='merge'
|
||||
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
|
||||
register: patch_result
|
||||
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
|
||||
failed_when: "'Error' in patch_result.stderr"
|
||||
when: calico_ebpf
|
||||
256
k3s-ansible-copia/roles/k3s_server_post/tasks/cilium.yml
Normal file
256
k3s-ansible-copia/roles/k3s_server_post/tasks/cilium.yml
Normal file
@ -0,0 +1,256 @@
|
||||
---
|
||||
- name: Prepare Cilium CLI on first master and deploy CNI
|
||||
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
||||
run_once: true
|
||||
block:
|
||||
- name: Create tmp directory on first master
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Check if Cilium CLI is installed
|
||||
ansible.builtin.command: cilium version
|
||||
register: cilium_cli_installed
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check for Cilium CLI version in command output
|
||||
ansible.builtin.set_fact:
|
||||
installed_cli_version: >-
|
||||
{{
|
||||
cilium_cli_installed.stdout_lines
|
||||
| join(' ')
|
||||
| regex_findall('cilium-cli: (v\d+\.\d+\.\d+)')
|
||||
| first
|
||||
| default('unknown')
|
||||
}}
|
||||
when: cilium_cli_installed.rc == 0
|
||||
|
||||
- name: Get latest stable Cilium CLI version file
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt
|
||||
dest: /tmp/k3s/cilium-cli-stable.txt
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Read Cilium CLI stable version from file
|
||||
ansible.builtin.command: cat /tmp/k3s/cilium-cli-stable.txt
|
||||
register: cli_ver
|
||||
changed_when: false
|
||||
|
||||
- name: Log installed Cilium CLI version
|
||||
ansible.builtin.debug:
|
||||
msg: "Installed Cilium CLI version: {{ installed_cli_version | default('Not installed') }}"
|
||||
|
||||
- name: Log latest stable Cilium CLI version
|
||||
ansible.builtin.debug:
|
||||
msg: "Latest Cilium CLI version: {{ cli_ver.stdout }}"
|
||||
|
||||
- name: Determine if Cilium CLI needs installation or update
|
||||
ansible.builtin.set_fact:
|
||||
cilium_cli_needs_update: >-
|
||||
{{
|
||||
cilium_cli_installed.rc != 0 or
|
||||
(cilium_cli_installed.rc == 0 and
|
||||
installed_cli_version != cli_ver.stdout)
|
||||
}}
|
||||
|
||||
- name: Install or update Cilium CLI
|
||||
when: cilium_cli_needs_update
|
||||
block:
|
||||
- name: Set architecture variable
|
||||
ansible.builtin.set_fact:
|
||||
cli_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
|
||||
- name: Download Cilium CLI and checksum
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ cilium_base_url }}/cilium-linux-{{ cli_arch }}{{ item }}"
|
||||
dest: /tmp/k3s/cilium-linux-{{ cli_arch }}{{ item }}
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
loop:
|
||||
- .tar.gz
|
||||
- .tar.gz.sha256sum
|
||||
vars:
|
||||
cilium_base_url: https://github.com/cilium/cilium-cli/releases/download/{{ cli_ver.stdout }}
|
||||
|
||||
- name: Verify the downloaded tarball
|
||||
ansible.builtin.shell: |
|
||||
cd /tmp/k3s && sha256sum --check cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
|
||||
- name: Extract Cilium CLI to /usr/local/bin
|
||||
ansible.builtin.unarchive:
|
||||
src: /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
|
||||
dest: /usr/local/bin
|
||||
remote_src: true
|
||||
|
||||
- name: Remove downloaded tarball and checksum file
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz
|
||||
- /tmp/k3s/cilium-linux-{{ cli_arch }}.tar.gz.sha256sum
|
||||
|
||||
- name: Wait for connectivity to kube VIP
|
||||
ansible.builtin.command: ping -c 1 {{ apiserver_endpoint }}
|
||||
register: ping_result
|
||||
until: ping_result.rc == 0
|
||||
retries: 21
|
||||
delay: 1
|
||||
ignore_errors: true
|
||||
changed_when: false
|
||||
|
||||
- name: Fail if kube VIP not reachable
|
||||
ansible.builtin.fail:
|
||||
msg: API endpoint {{ apiserver_endpoint }} is not reachable
|
||||
when: ping_result.rc != 0
|
||||
|
||||
- name: Test for existing Cilium install
|
||||
ansible.builtin.command: |
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n kube-system get daemonsets cilium
|
||||
register: cilium_installed
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check existing Cilium install
|
||||
when: cilium_installed.rc == 0
|
||||
block:
|
||||
- name: Check Cilium version
|
||||
ansible.builtin.command: cilium version
|
||||
register: cilium_version
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
ignore_errors: true
|
||||
|
||||
- name: Parse installed Cilium version
|
||||
ansible.builtin.set_fact:
|
||||
installed_cilium_version: >-
|
||||
{{
|
||||
cilium_version.stdout_lines
|
||||
| join(' ')
|
||||
| regex_findall('cilium image.+(\d+\.\d+\.\d+)')
|
||||
| first
|
||||
| default('unknown')
|
||||
}}
|
||||
|
||||
- name: Determine if Cilium needs update
|
||||
ansible.builtin.set_fact:
|
||||
cilium_needs_update: >-
|
||||
{{ 'v' + installed_cilium_version != cilium_tag }}
|
||||
|
||||
- name: Log result
|
||||
ansible.builtin.debug:
|
||||
msg: >
|
||||
Installed Cilium version: {{ installed_cilium_version }},
|
||||
Target Cilium version: {{ cilium_tag }},
|
||||
Update needed: {{ cilium_needs_update }}
|
||||
|
||||
- name: Install Cilium
|
||||
ansible.builtin.command: >-
|
||||
{% if cilium_installed.rc != 0 %}
|
||||
cilium install
|
||||
{% else %}
|
||||
cilium upgrade
|
||||
{% endif %}
|
||||
--version "{{ cilium_tag }}"
|
||||
--helm-set operator.replicas="1"
|
||||
{{ '--helm-set devices=' + cilium_iface if cilium_iface != 'auto' else '' }}
|
||||
--helm-set ipam.operator.clusterPoolIPv4PodCIDRList={{ cluster_cidr }}
|
||||
{% if cilium_mode == "native" or (cilium_bgp and cilium_exportPodCIDR != 'false') %}
|
||||
--helm-set ipv4NativeRoutingCIDR={{ cluster_cidr }}
|
||||
{% endif %}
|
||||
--helm-set k8sServiceHost="127.0.0.1"
|
||||
--helm-set k8sServicePort="6444"
|
||||
--helm-set routingMode={{ cilium_mode }}
|
||||
--helm-set autoDirectNodeRoutes={{ "true" if cilium_mode == "native" else "false" }}
|
||||
--helm-set kubeProxyReplacement={{ kube_proxy_replacement }}
|
||||
--helm-set bpf.masquerade={{ enable_bpf_masquerade }}
|
||||
--helm-set bgpControlPlane.enabled={{ cilium_bgp | default("false") }}
|
||||
--helm-set hubble.enabled={{ "true" if cilium_hubble else "false" }}
|
||||
--helm-set hubble.relay.enabled={{ "true" if cilium_hubble else "false" }}
|
||||
--helm-set hubble.ui.enabled={{ "true" if cilium_hubble else "false" }}
|
||||
{% if kube_proxy_replacement is not false %}
|
||||
--helm-set bpf.loadBalancer.algorithm={{ bpf_lb_algorithm }}
|
||||
--helm-set bpf.loadBalancer.mode={{ bpf_lb_mode }}
|
||||
{% endif %}
|
||||
environment:
|
||||
KUBECONFIG: "{{ ansible_user_dir }}/.kube/config"
|
||||
register: cilium_install_result
|
||||
changed_when: cilium_install_result.rc == 0
|
||||
when: cilium_installed.rc != 0 or cilium_needs_update
|
||||
|
||||
- name: Wait for Cilium resources
|
||||
ansible.builtin.command: >-
|
||||
{% if item.type == 'daemonset' %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait pods
|
||||
--namespace=kube-system
|
||||
--selector='k8s-app=cilium'
|
||||
--for=condition=Ready
|
||||
{% else %}
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.type }}/{{ item.name }}
|
||||
--namespace=kube-system
|
||||
--for=condition=Available
|
||||
{% endif %}
|
||||
--timeout=30s
|
||||
register: cr_result
|
||||
changed_when: false
|
||||
until: cr_result is succeeded
|
||||
retries: 30
|
||||
delay: 7
|
||||
with_items:
|
||||
- { name: cilium-operator, type: deployment }
|
||||
- { name: cilium, type: daemonset, selector: k8s-app=cilium }
|
||||
- { name: hubble-relay, type: deployment, check_hubble: true }
|
||||
- { name: hubble-ui, type: deployment, check_hubble: true }
|
||||
loop_control:
|
||||
label: "{{ item.type }}/{{ item.name }}"
|
||||
when: >-
|
||||
not item.check_hubble | default(false) or (item.check_hubble | default(false) and cilium_hubble)
|
||||
|
||||
- name: Configure Cilium BGP
|
||||
when: cilium_bgp
|
||||
block:
|
||||
- name: Set _cilium_bgp_neighbors fact
|
||||
ansible.builtin.set_fact:
|
||||
_cilium_bgp_neighbors: "{{ lookup('community.general.merge_variables', '^cilium_bgp_neighbors__.+$', initial_value=cilium_bgp_neighbors, groups=cilium_bgp_neighbors_groups) }}" # yamllint disable-line rule:line-length
|
||||
|
||||
- name: Copy BGP manifests to first master
|
||||
ansible.builtin.template:
|
||||
src: cilium.crs.j2
|
||||
dest: /tmp/k3s/cilium-bgp.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
|
||||
- name: Apply BGP manifests
|
||||
ansible.builtin.command:
|
||||
cmd: "{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/cilium-bgp.yaml"
|
||||
register: apply_cr
|
||||
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
||||
failed_when: "'is invalid' in apply_cr.stderr"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Print error message if BGP manifests application fails
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ apply_cr.stderr }}"
|
||||
when: "'is invalid' in apply_cr.stderr"
|
||||
|
||||
- name: Test for BGP config resources
|
||||
ansible.builtin.command: "{{ item }}"
|
||||
loop:
|
||||
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumBGPPeeringPolicy.cilium.io"
|
||||
- "{{ k3s_kubectl_binary | default('k3s kubectl') }} get CiliumLoadBalancerIPPool.cilium.io"
|
||||
changed_when: false
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
20
k3s-ansible-copia/roles/k3s_server_post/tasks/main.yml
Normal file
20
k3s-ansible-copia/roles/k3s_server_post/tasks/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: Deploy calico
|
||||
ansible.builtin.include_tasks: calico.yml
|
||||
tags: calico
|
||||
when: calico_iface is defined and cilium_iface is not defined
|
||||
|
||||
- name: Deploy cilium
|
||||
ansible.builtin.include_tasks: cilium.yml
|
||||
tags: cilium
|
||||
when: cilium_iface is defined
|
||||
|
||||
- name: Deploy metallb pool
|
||||
ansible.builtin.include_tasks: metallb.yml
|
||||
tags: metallb
|
||||
when: kube_vip_lb_ip_range is not defined and (not cilium_bgp or cilium_iface is not defined)
|
||||
|
||||
- name: Remove tmp directory used for manifests
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: absent
|
||||
136
k3s-ansible-copia/roles/k3s_server_post/tasks/metallb.yml
Normal file
136
k3s-ansible-copia/roles/k3s_server_post/tasks/metallb.yml
Normal file
@ -0,0 +1,136 @@
|
||||
---
|
||||
- name: Create manifests directory for temp configuration
|
||||
ansible.builtin.file:
|
||||
path: /tmp/k3s
|
||||
state: directory
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "0755"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Delete outdated metallb replicas
|
||||
ansible.builtin.shell: |-
|
||||
set -o pipefail
|
||||
|
||||
REPLICAS=$({{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' get replicasets \
|
||||
-l 'component=controller,app=metallb' \
|
||||
-o jsonpath='{.items[0].spec.template.spec.containers[0].image}, {.items[0].metadata.name}' 2>/dev/null || true)
|
||||
REPLICAS_SETS=$(echo ${REPLICAS} | grep -v '{{ metal_lb_controller_tag_version }}' | sed -e "s/^.*\s//g")
|
||||
if [ -n "${REPLICAS_SETS}" ] ; then
|
||||
for REPLICAS in "${REPLICAS_SETS}"
|
||||
do
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} --namespace='metallb-system' \
|
||||
delete rs "${REPLICAS}"
|
||||
done
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
|
||||
- name: Copy metallb CRs manifest to first master
|
||||
ansible.builtin.template:
|
||||
src: metallb.crs.j2
|
||||
dest: /tmp/k3s/metallb-crs.yaml
|
||||
owner: "{{ ansible_user_id }}"
|
||||
mode: "0755"
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Test metallb-system namespace
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system
|
||||
changed_when: false
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Wait for MetalLB resources
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} wait {{ item.resource }}
|
||||
--namespace='metallb-system'
|
||||
{% if item.name | default(False) -%}{{ item.name }}{%- endif %}
|
||||
{% if item.selector | default(False) -%}--selector='{{ item.selector }}'{%- endif %}
|
||||
{% if item.condition | default(False) -%}{{ item.condition }}{%- endif %}
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
changed_when: false
|
||||
run_once: true
|
||||
with_items:
|
||||
- description: controller
|
||||
resource: deployment
|
||||
name: controller
|
||||
condition: --for condition=Available=True
|
||||
- description: webhook service
|
||||
resource: pod
|
||||
selector: component=controller
|
||||
condition: --for=jsonpath='{.status.phase}'=Running
|
||||
- description: pods in replica sets
|
||||
resource: pod
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for condition=Ready
|
||||
- description: ready replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.readyReplicas}'=1
|
||||
- description: fully labeled replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.fullyLabeledReplicas}'=1
|
||||
- description: available replicas of controller
|
||||
resource: replicaset
|
||||
selector: component=controller,app=metallb
|
||||
condition: --for=jsonpath='{.status.availableReplicas}'=1
|
||||
loop_control:
|
||||
label: "{{ item.description }}"
|
||||
|
||||
- name: Set metallb webhook service name
|
||||
ansible.builtin.set_fact:
|
||||
metallb_webhook_service_name: >-
|
||||
{{
|
||||
(
|
||||
(metal_lb_controller_tag_version | regex_replace('^v', ''))
|
||||
is
|
||||
version('0.14.4', '<', version_type='semver')
|
||||
) | ternary(
|
||||
'webhook-service',
|
||||
'metallb-webhook-service'
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Test metallb-system webhook-service endpoint
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get endpoints {{ metallb_webhook_service_name }}
|
||||
changed_when: false
|
||||
with_items: "{{ groups[group_name_master | default('master')] }}"
|
||||
run_once: true
|
||||
|
||||
- name: Apply metallb CRs
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} apply -f /tmp/k3s/metallb-crs.yaml
|
||||
--timeout='{{ metal_lb_available_timeout }}'
|
||||
register: this
|
||||
changed_when: false
|
||||
run_once: true
|
||||
until: this.rc == 0
|
||||
retries: 5
|
||||
|
||||
- name: Test metallb-system resources for Layer 2 configuration
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "layer2"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- L2Advertisement
|
||||
|
||||
- name: Test metallb-system resources for BGP configuration
|
||||
ansible.builtin.command: >-
|
||||
{{ k3s_kubectl_binary | default('k3s kubectl') }} -n metallb-system get {{ item }}
|
||||
changed_when: false
|
||||
run_once: true
|
||||
when: metal_lb_mode == "bgp"
|
||||
with_items:
|
||||
- IPAddressPool
|
||||
- BGPPeer
|
||||
- BGPAdvertisement
|
||||
@ -0,0 +1,41 @@
|
||||
# This section includes base Calico installation configuration.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# Configures Calico networking.
|
||||
calicoNetwork:
|
||||
# Note: The ipPools section cannot be modified post-install.
|
||||
ipPools:
|
||||
- blockSize: {{ calico_blockSize }}
|
||||
cidr: {{ cluster_cidr }}
|
||||
encapsulation: {{ calico_encapsulation }}
|
||||
natOutgoing: {{ calico_natOutgoing }}
|
||||
nodeSelector: {{ calico_nodeSelector }}
|
||||
nodeAddressAutodetectionV4:
|
||||
interface: {{ calico_iface }}
|
||||
linuxDataplane: {{ 'BPF' if calico_ebpf else 'Iptables' }}
|
||||
|
||||
---
|
||||
|
||||
# This section configures the Calico API server.
|
||||
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
||||
|
||||
{% if calico_ebpf %}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-services-endpoint
|
||||
namespace: tigera-operator
|
||||
data:
|
||||
KUBERNETES_SERVICE_HOST: '{{ apiserver_endpoint }}'
|
||||
KUBERNETES_SERVICE_PORT: '6443'
|
||||
{% endif %}
|
||||
@ -0,0 +1,48 @@
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
kind: CiliumBGPPeeringPolicy
|
||||
metadata:
|
||||
name: 01-bgp-peering-policy
|
||||
spec: # CiliumBGPPeeringPolicySpec
|
||||
virtualRouters: # []CiliumBGPVirtualRouter
|
||||
- localASN: {{ cilium_bgp_my_asn }}
|
||||
exportPodCIDR: {{ cilium_exportPodCIDR | default('true') }}
|
||||
neighbors: # []CiliumBGPNeighbor
|
||||
{% if _cilium_bgp_neighbors | length > 0 %}
|
||||
{% for item in _cilium_bgp_neighbors %}
|
||||
- peerAddress: '{{ item.peer_address + "/32"}}'
|
||||
peerASN: {{ item.peer_asn }}
|
||||
eBGPMultihopTTL: 10
|
||||
connectRetryTimeSeconds: 120
|
||||
holdTimeSeconds: 90
|
||||
keepAliveTimeSeconds: 30
|
||||
gracefulRestart:
|
||||
enabled: true
|
||||
restartTimeSeconds: 120
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
- peerAddress: '{{ cilium_bgp_peer_address + "/32"}}'
|
||||
peerASN: {{ cilium_bgp_peer_asn }}
|
||||
eBGPMultihopTTL: 10
|
||||
connectRetryTimeSeconds: 120
|
||||
holdTimeSeconds: 90
|
||||
keepAliveTimeSeconds: 30
|
||||
gracefulRestart:
|
||||
enabled: true
|
||||
restartTimeSeconds: 120
|
||||
{% endif %}
|
||||
serviceSelector:
|
||||
matchExpressions:
|
||||
- {key: somekey, operator: NotIn, values: ['never-used-value']}
|
||||
---
|
||||
apiVersion: "cilium.io/v2alpha1"
|
||||
kind: CiliumLoadBalancerIPPool
|
||||
metadata:
|
||||
name: "01-lb-pool"
|
||||
spec:
|
||||
blocks:
|
||||
{% if "/" in cilium_bgp_lb_cidr %}
|
||||
- cidr: {{ cilium_bgp_lb_cidr }}
|
||||
{% else %}
|
||||
- start: {{ cilium_bgp_lb_cidr.split('-')[0] }}
|
||||
stop: {{ cilium_bgp_lb_cidr.split('-')[1] }}
|
||||
{% endif %}
|
||||
@ -0,0 +1,43 @@
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: first-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
{% if metal_lb_ip_range is string %}
|
||||
{# metal_lb_ip_range was used in the legacy way: single string instead of a list #}
|
||||
{# => transform to list with single element #}
|
||||
{% set metal_lb_ip_range = [metal_lb_ip_range] %}
|
||||
{% endif %}
|
||||
{% for range in metal_lb_ip_range %}
|
||||
- {{ range }}
|
||||
{% endfor %}
|
||||
|
||||
{% if metal_lb_mode == "layer2" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
{% if metal_lb_mode == "bgp" %}
|
||||
---
|
||||
apiVersion: metallb.io/v1beta2
|
||||
kind: BGPPeer
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
myASN: {{ metal_lb_bgp_my_asn }}
|
||||
peerASN: {{ metal_lb_bgp_peer_asn }}
|
||||
peerAddress: {{ metal_lb_bgp_peer_address }}
|
||||
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: BGPAdvertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
{% endif %}
|
||||
6
k3s-ansible-copia/roles/lxc/handlers/main.yml
Normal file
6
k3s-ansible-copia/roles/lxc/handlers/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Reboot server
|
||||
become: true
|
||||
ansible.builtin.reboot:
|
||||
reboot_command: "{{ custom_reboot_command | default(omit) }}"
|
||||
listen: reboot server
|
||||
8
k3s-ansible-copia/roles/lxc/meta/main.yml
Normal file
8
k3s-ansible-copia/roles/lxc/meta/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Configure LXC
|
||||
options:
|
||||
custom_reboot_command:
|
||||
default: ~
|
||||
description: Command to run on reboot
|
||||
21
k3s-ansible-copia/roles/lxc/tasks/main.yml
Normal file
21
k3s-ansible-copia/roles/lxc/tasks/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Check for rc.local file
|
||||
ansible.builtin.stat:
|
||||
path: /etc/rc.local
|
||||
register: rcfile
|
||||
|
||||
- name: Create rc.local if needed
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/rc.local
|
||||
line: "#!/bin/sh -e"
|
||||
create: true
|
||||
insertbefore: BOF
|
||||
mode: u=rwx,g=rx,o=rx
|
||||
when: not rcfile.stat.exists
|
||||
|
||||
- name: Write rc.local file
|
||||
ansible.builtin.blockinfile:
|
||||
path: /etc/rc.local
|
||||
content: "{{ lookup('template', 'templates/rc.local.j2') }}"
|
||||
state: present
|
||||
notify: reboot server
|
||||
4
k3s-ansible-copia/roles/prereq/defaults/main.yml
Normal file
4
k3s-ansible-copia/roles/prereq/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
secure_path:
|
||||
RedHat: /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
|
||||
Suse: /usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin
|
||||
7
k3s-ansible-copia/roles/prereq/meta/main.yml
Normal file
7
k3s-ansible-copia/roles/prereq/meta/main.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Prerequisites
|
||||
options:
|
||||
system_timezone:
|
||||
description: Timezone to be set on all nodes
|
||||
70
k3s-ansible-copia/roles/prereq/tasks/main.yml
Normal file
70
k3s-ansible-copia/roles/prereq/tasks/main.yml
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
- name: Set same timezone on every Server
|
||||
become: yes
|
||||
community.general.timezone:
|
||||
name: "{{ system_timezone }}"
|
||||
when: (system_timezone is defined) and (system_timezone != "Your/Timezone")
|
||||
|
||||
- name: Set SELinux to disabled state
|
||||
ansible.posix.selinux:
|
||||
state: disabled
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Enable IPv4 forwarding
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: "1"
|
||||
state: present
|
||||
reload: true
|
||||
tags: sysctl
|
||||
|
||||
- name: Enable IPv6 forwarding
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv6.conf.all.forwarding
|
||||
value: "1"
|
||||
state: present
|
||||
reload: true
|
||||
tags: sysctl
|
||||
|
||||
- name: Enable IPv6 router advertisements
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv6.conf.all.accept_ra
|
||||
value: "2"
|
||||
state: present
|
||||
reload: true
|
||||
tags: sysctl
|
||||
|
||||
- name: Add br_netfilter to /etc/modules-load.d/
|
||||
ansible.builtin.copy:
|
||||
content: br_netfilter
|
||||
dest: /etc/modules-load.d/br_netfilter.conf
|
||||
mode: u=rw,g=,o=
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Load br_netfilter
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Set bridge-nf-call-iptables (just to be sure)
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item }}"
|
||||
value: "1"
|
||||
state: present
|
||||
reload: true
|
||||
when: ansible_os_family == "RedHat"
|
||||
loop:
|
||||
- net.bridge.bridge-nf-call-iptables
|
||||
- net.bridge.bridge-nf-call-ip6tables
|
||||
tags: sysctl
|
||||
|
||||
- name: Add /usr/local/bin to sudo secure_path
|
||||
ansible.builtin.lineinfile:
|
||||
line: Defaults secure_path = {{ secure_path[ansible_os_family] }}
|
||||
regexp: Defaults(\s)*secure_path(\s)*=
|
||||
state: present
|
||||
insertafter: EOF
|
||||
path: /etc/sudoers
|
||||
validate: visudo -cf %s
|
||||
when: ansible_os_family in [ "RedHat", "Suse" ]
|
||||
13
k3s-ansible-copia/roles/proxmox_lxc/handlers/main.yml
Executable file
13
k3s-ansible-copia/roles/proxmox_lxc/handlers/main.yml
Executable file
@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: Reboot containers
|
||||
block:
|
||||
- name: Get container ids from filtered files
|
||||
ansible.builtin.set_fact:
|
||||
proxmox_lxc_filtered_ids: >-
|
||||
{{ proxmox_lxc_filtered_files | map("split", "/") | map("last") | map("split", ".") | map("first") }}
|
||||
listen: reboot containers
|
||||
- name: Reboot container
|
||||
ansible.builtin.command: pct reboot {{ item }}
|
||||
loop: "{{ proxmox_lxc_filtered_ids }}"
|
||||
changed_when: true
|
||||
listen: reboot containers
|
||||
9
k3s-ansible-copia/roles/proxmox_lxc/meta/main.yml
Normal file
9
k3s-ansible-copia/roles/proxmox_lxc/meta/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
argument_specs:
|
||||
main:
|
||||
short_description: Proxmox LXC settings
|
||||
options:
|
||||
proxmox_lxc_ct_ids:
|
||||
description: Proxmox container ID list
|
||||
type: list
|
||||
required: true
|
||||
43
k3s-ansible-copia/roles/proxmox_lxc/tasks/main.yml
Normal file
43
k3s-ansible-copia/roles/proxmox_lxc/tasks/main.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Check for container files that exist on this host
|
||||
ansible.builtin.stat:
|
||||
path: /etc/pve/lxc/{{ item }}.conf
|
||||
loop: "{{ proxmox_lxc_ct_ids }}"
|
||||
register: stat_results
|
||||
|
||||
- name: Filter out files that do not exist
|
||||
ansible.builtin.set_fact:
|
||||
proxmox_lxc_filtered_files: '{{ stat_results.results | rejectattr("stat.exists", "false") | map(attribute="stat.path") }}' # noqa yaml[line-length]
|
||||
|
||||
# https://gist.github.com/triangletodd/02f595cd4c0dc9aac5f7763ca2264185
|
||||
- name: Ensure lxc config has the right apparmor profile
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: ^lxc.apparmor.profile
|
||||
line: "lxc.apparmor.profile: unconfined"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cgroup
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: ^lxc.cgroup.devices.allow
|
||||
line: "lxc.cgroup.devices.allow: a"
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right cap drop
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: ^lxc.cap.drop
|
||||
line: "lxc.cap.drop: "
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
|
||||
- name: Ensure lxc config has the right mounts
|
||||
ansible.builtin.lineinfile:
|
||||
dest: "{{ item }}"
|
||||
regexp: ^lxc.mount.auto
|
||||
line: 'lxc.mount.auto: "proc:rw sys:rw"'
|
||||
loop: "{{ proxmox_lxc_filtered_files }}"
|
||||
notify: reboot containers
|
||||
6
k3s-ansible-copia/roles/raspberrypi/defaults/main.yml
Normal file
6
k3s-ansible-copia/roles/raspberrypi/defaults/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# Indicates whether the k3s prerequisites for Raspberry Pi should be set up
|
||||
# Possible values:
|
||||
# - present
|
||||
# - absent
|
||||
state: present
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user