* Add Tigera Operator/Calico CNI option Small tweak to reduce delta from head Set calico option to be disabled by default Add rescue blocks in case updating existing Refactor items and update comments Refactor and consolidate calico.yml into block Refactor to use template for Calico CRs Revert use_calico to false Template blockSize Align default cidr in template with all.yml sample Apply upstream version tags Revert to current ver tags. Upstream's don't work. Update template address detection Add Tigera Operator/Calico CNI option * Add calico-apiserver check * Add eBPF dataplane option * Add kube svc endpoint configmap when ebpf enabled * Add /etc/cni/net.d to reset task * Refactor based on comments * Add molecule scenario * Fix lint --------- Co-authored-by: Techno Tim <timothystewart6@gmail.com>
115 lines
4.4 KiB
YAML
115 lines
4.4 KiB
YAML
---
|
|
- name: Deploy Calico to cluster
|
|
when: ansible_hostname == hostvars[groups[group_name_master | default('master')][0]]['ansible_hostname']
|
|
run_once: true
|
|
block:
|
|
- name: Create manifests directory on first master
|
|
file:
|
|
path: /tmp/k3s
|
|
state: directory
|
|
owner: root
|
|
group: root
|
|
mode: 0755
|
|
|
|
- name: "Download to first master: manifest for Tigera Operator and Calico CRDs"
|
|
ansible.builtin.get_url:
|
|
url: "https://raw.githubusercontent.com/projectcalico/calico/{{ calico_tag }}/manifests/tigera-operator.yaml"
|
|
dest: "/tmp/k3s/tigera-operator.yaml"
|
|
owner: root
|
|
group: root
|
|
mode: 0755
|
|
|
|
- name: Copy Calico custom resources manifest to first master
|
|
ansible.builtin.template:
|
|
src: "calico.crs.j2"
|
|
dest: /tmp/k3s/custom-resources.yaml
|
|
owner: root
|
|
group: root
|
|
mode: 0755
|
|
|
|
- name: Deploy or replace Tigera Operator
|
|
block:
|
|
- name: Deploy Tigera Operator
|
|
ansible.builtin.command:
|
|
cmd: kubectl create -f /tmp/k3s/tigera-operator.yaml
|
|
register: create_operator
|
|
changed_when: "'created' in create_operator.stdout"
|
|
failed_when: "'Error' in create_operator.stderr and 'already exists' not in create_operator.stderr"
|
|
rescue:
|
|
- name: Replace existing Tigera Operator
|
|
ansible.builtin.command:
|
|
cmd: kubectl replace -f /tmp/k3s/tigera-operator.yaml
|
|
register: replace_operator
|
|
changed_when: "'replaced' in replace_operator.stdout"
|
|
failed_when: "'Error' in replace_operator.stderr"
|
|
|
|
- name: Wait for Tigera Operator resources
|
|
command: >-
|
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
|
--namespace='tigera-operator'
|
|
--for=condition=Available=True
|
|
--timeout=7s
|
|
register: tigera_result
|
|
changed_when: false
|
|
until: tigera_result is succeeded
|
|
retries: 7
|
|
delay: 7
|
|
with_items:
|
|
- {name: tigera-operator, type: deployment}
|
|
loop_control:
|
|
label: "{{ item.type }}/{{ item.name }}"
|
|
|
|
- name: Deploy Calico custom resources
|
|
block:
|
|
- name: Deploy custom resources for Calico
|
|
ansible.builtin.command:
|
|
cmd: kubectl create -f /tmp/k3s/custom-resources.yaml
|
|
register: create_cr
|
|
changed_when: "'created' in create_cr.stdout"
|
|
failed_when: "'Error' in create_cr.stderr and 'already exists' not in create_cr.stderr"
|
|
rescue:
|
|
- name: Apply new Calico custom resource manifest
|
|
ansible.builtin.command:
|
|
cmd: kubectl apply -f /tmp/k3s/custom-resources.yaml
|
|
register: apply_cr
|
|
changed_when: "'configured' in apply_cr.stdout or 'created' in apply_cr.stdout"
|
|
failed_when: "'Error' in apply_cr.stderr"
|
|
|
|
- name: Wait for Calico system resources to be available
|
|
command: >-
|
|
{% if item.type == 'daemonset' %}
|
|
k3s kubectl wait pods
|
|
--namespace='{{ item.namespace }}'
|
|
--selector={{ item.selector }}
|
|
--for=condition=Ready
|
|
{% else %}
|
|
k3s kubectl wait {{ item.type }}/{{ item.name }}
|
|
--namespace='{{ item.namespace }}'
|
|
--for=condition=Available
|
|
{% endif %}
|
|
--timeout=7s
|
|
register: cr_result
|
|
changed_when: false
|
|
until: cr_result is succeeded
|
|
retries: 30
|
|
delay: 7
|
|
with_items:
|
|
- {name: calico-typha, type: deployment, namespace: calico-system}
|
|
- {name: calico-kube-controllers, type: deployment, namespace: calico-system}
|
|
- {name: csi-node-driver, type: daemonset, selector: 'k8s-app=csi-node-driver', namespace: calico-system}
|
|
- {name: calico-node, type: daemonset, selector: 'k8s-app=calico-node', namespace: calico-system}
|
|
- {name: calico-apiserver, type: deployment, namespace: calico-apiserver}
|
|
loop_control:
|
|
label: "{{ item.type }}/{{ item.name }}"
|
|
|
|
- name: Patch Felix configuration for eBPF mode
|
|
ansible.builtin.command:
|
|
cmd: >
|
|
kubectl patch felixconfiguration default
|
|
--type='merge'
|
|
--patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
|
|
register: patch_result
|
|
changed_when: "'felixconfiguration.projectcalico.org/default patched' in patch_result.stdout"
|
|
failed_when: "'Error' in patch_result.stderr"
|
|
when: calico_ebpf
|