mirror of
https://github.com/PyratLabs/ansible-role-k3s
synced 2025-01-06 09:40:19 +01:00
Fixed data-dir configuration and draining of nodes. Added documentation.
This commit is contained in:
parent
21adf94627
commit
4afc2c8a5a
30 changed files with 964 additions and 31 deletions
|
@ -14,14 +14,13 @@ env:
|
|||
matrix:
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-debian10-ansible:latest
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-ubuntu2004-ansible:latest
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-amazonlinux2-ansible:latest
|
||||
|
||||
# Test installing docker
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-centos7-ansible:latest
|
||||
MOLECULE_SCENARIO: docker
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-ubuntu1804-ansible:latest
|
||||
MOLECULE_SCENARIO: docker
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-amazonlinux2-ansible:latest
|
||||
MOLECULE_SCENARIO: docker
|
||||
|
||||
# Test disabling all deployments
|
||||
- MOLECULE_DISTRO: geerlingguy/docker-fedora31-ansible:latest
|
||||
|
|
27
CHANGELOG.md
27
CHANGELOG.md
|
@ -7,11 +7,38 @@
|
|||
|
||||
### Breaking changes
|
||||
|
||||
### Known issues
|
||||
|
||||
### Contributors
|
||||
|
||||
---
|
||||
-->
|
||||
|
||||
## 2020-12-05, v2.1.0
|
||||
|
||||
### Notable changes
|
||||
|
||||
- Deprecated configuration check built into validation steps.
|
||||
- Removed duplicated tasks for single node cluster.
|
||||
- Added documentation providing quickstart examples and common operations.
|
||||
- Fixed data-dir configuration.
|
||||
- Some tweaks to rootless.
|
||||
- Fix draining and removing of nodes.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `k3s_token_location` now points to a file location, not a directory.
|
||||
- `k3s_systemd_unit_directory` renamed to `k3s_systemd_unit_dir`
|
||||
- Removed `k3s_node_data_dir` as this is now configured with `data-dir` in
|
||||
`k3s_server` and/or `k3s_agent`.
|
||||
|
||||
### Known issues
|
||||
|
||||
- Rootless is still broken, this is still not supported as a method for
|
||||
running k3s using this role.
|
||||
|
||||
---
|
||||
|
||||
## 2020-11-30, v2.0.2
|
||||
|
||||
### Notable changes
|
||||
|
|
|
@ -83,7 +83,6 @@ consistency. These are generally cluster-level configuration.
|
|||
| `k3s_skip_validation` | Skip all tasks that validate configuration. | `false` |
|
||||
| `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` |
|
||||
| `k3s_install_hard_links` | Install using hard links rather than symbolic links. | `false` |
|
||||
| `k3s_server_manifests_dir` | Path for place the `k3s_server_manifests_templates`. | `/var/lib/rancher/k3s/server/manifests` |
|
||||
| `k3s_server_manifests_templates` | A list of Auto-Deploying Manifests Templates. | [] |
|
||||
| `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` |
|
||||
| `k3s_use_unsupported_config` | Allow the use of unsupported configurations in k3s. | `false` |
|
||||
|
@ -101,6 +100,7 @@ particularly with regards to privilege escalation.
|
|||
| `k3s_become_for_systemd` | Escalate user privileges for systemd tasks. | NULL |
|
||||
| `k3s_become_for_install_dir` | Escalate user privileges for creating installation directories. | NULL |
|
||||
| `k3s_become_for_usr_local_bin` | Escalate user privileges for writing to `/usr/local/bin`. | NULL |
|
||||
| `k3s_become_for_data_dir` | Escalate user privileges for creating data directory. | NULL |
|
||||
| `k3s_become_for_package_install` | Escalate user privileges for installing k3s. | NULL |
|
||||
| `k3s_become_for_kubectl` | Escalate user privileges for running `kubectl`. | NULL |
|
||||
| `k3s_become_for_uninstall` | Escalate user privileges for uninstalling k3s. | NULL |
|
||||
|
@ -133,7 +133,8 @@ variable as per the below example:
|
|||
k3s_server: "{{ lookup('file', 'path/to/k3s_server.yml') | from_yaml }}"
|
||||
```
|
||||
|
||||
<!-- See examples: Documentation coming soon -->
|
||||
Check out the [Documentation](Documentation/README.md) for example
|
||||
configuration.
|
||||
|
||||
### Agent (Worker) Configuration
|
||||
|
||||
|
@ -160,7 +161,8 @@ variable as per the below example:
|
|||
k3s_agent: "{{ lookup('file', 'path/to/k3s_agent.yml') | from_yaml }}"
|
||||
```
|
||||
|
||||
<!-- See examples: Documentation coming soon -->
|
||||
Check out the [Documentation](Documentation/README.md) for example
|
||||
configuration.
|
||||
|
||||
#### Important note about `k3s_release_version`
|
||||
|
||||
|
|
|
@ -72,6 +72,7 @@ k3s_become_for_all: false
|
|||
k3s_become_for_systemd: null
|
||||
k3s_become_for_install_dir: null
|
||||
k3s_become_for_usr_local_bin: null
|
||||
k3s_become_for_data_dir: null
|
||||
k3s_become_for_package_install: null
|
||||
k3s_become_for_kubectl: null
|
||||
k3s_become_for_uninstall: null
|
||||
|
|
39
documentation/README.md
Normal file
39
documentation/README.md
Normal file
|
@ -0,0 +1,39 @@
|
|||
# ansible-role-k3s
|
||||
|
||||
This document describes a number of ways of consuming this Ansible role for use
|
||||
in your own k3s deployments. It will not be able to cover every use case
|
||||
scenario but will provide some common example configurations.
|
||||
|
||||
## Requirements
|
||||
|
||||
Before you start you will need an Ansible controller. This can either be your
|
||||
workstation, or a dedicated system that you have access to. The instructions
|
||||
in this documentation assume you are using `ansible` CLI, there are no
|
||||
instructions available for Ansible Tower at this time.
|
||||
|
||||
Follow the below guide to get Ansible installed.
|
||||
|
||||
https://docs.ansible.com/ansible/latest/installation_guide/index.html
|
||||
|
||||
## Quickstart
|
||||
|
||||
Below are quickstart examples for a single node k3s server, a k3s cluster
|
||||
with a single control node and HA k3s cluster. These represent the bare
|
||||
minimum configuration.
|
||||
|
||||
- [Single node k3s](quickstart-single-node.md)
|
||||
- [Simple k3s cluster](quickstart-cluster.md)
|
||||
- [HA k3s cluster using embedded etcd](quickstart-ha-cluster.md)
|
||||
|
||||
## Example configurations and operations
|
||||
|
||||
### Configuration
|
||||
|
||||
- [Setting up 2-node HA control plane with external datastore](configuration/2-node-ha-ext-datastore.md)
|
||||
- [Provision multiple standalone k3s nodes](configuration/multiple-standalone-k3s-nodes.md)
|
||||
|
||||
### Operations
|
||||
|
||||
- [Stop/Start a cluster](operations/stop-start-cluster.md)
|
||||
- [Extending a cluster](operations/extending-a-cluster.md)
|
||||
- [Shrinking a cluster](operations/shrinking-a-cluster.md)
|
77
documentation/configuration/2-node-ha-ext-datastore.md
Normal file
77
documentation/configuration/2-node-ha-ext-datastore.md
Normal file
|
@ -0,0 +1,77 @@
|
|||
# 2 Node HA Control Plane with external database
|
||||
|
||||
For this configuration we are deploying a highly available control plane
|
||||
composed of two control nodes. This can be achieved with embedded etcd, however
|
||||
etcd ideally has an odd number of nodes.
|
||||
|
||||
The example below will use an external PostgreSQL datastore to store the
|
||||
cluster state information.
|
||||
|
||||
Main guide: https://rancher.com/docs/k3s/latest/en/installation/ha/
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
+---------------+
|
||||
| Load Balancer |
|
||||
+-------+-------+
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
+------------+ | +------------+
|
||||
| | | | |
|
||||
+--------+ control-01 +<-----+----->+ control-02 |
|
||||
| | | | |
|
||||
| +-----+------+ +------+-----+
|
||||
| | |
|
||||
| +-------------+-------------+
|
||||
| | | |
|
||||
| +------v----+ +-----v-----+ +----v------+
|
||||
| | | | | | |
|
||||
| | worker-01 | | worker-02 | | worker-03 |
|
||||
| | | | | | |
|
||||
| +-----------+ +-----------+ +-----------+
|
||||
|
|
||||
| +-------+ +-------+
|
||||
| | | | |
|
||||
+-------------------> db-01 +--+ db-02 |
|
||||
| | | |
|
||||
+-------+ +-------+
|
||||
```
|
||||
|
||||
### Required Components
|
||||
|
||||
- Load balancer
|
||||
- 2 control plane nodes
|
||||
- 1 or more worker nodes
|
||||
- PostgreSQL Database (replicated, or Linux HA Cluster).
|
||||
|
||||
## Configuration
|
||||
|
||||
For your control nodes, you will need to instruct the control plane of the
|
||||
PostgreSQL datastore endpoint and set `k3s_control_node_address` to be the
|
||||
hostname or IP of your load balancer.
|
||||
|
||||
Below is the example for PostgreSQL, it is possible to use MySQL or an Etcd
|
||||
cluster as well. Consult the below guide for using alternative datastore
|
||||
endpoints.
|
||||
|
||||
https://rancher.com/docs/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_server:
|
||||
datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable
|
||||
```
|
||||
|
||||
Your worker nodes need to know how to connect to the control plane, this is
|
||||
defined by setting `k3s_control_node_address` to the hostname or IP address of
|
||||
the load balancer.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_control_node_address: control.examplek3s.com
|
||||
```
|
71
documentation/configuration/multiple-standalone-k3s-nodes.md
Normal file
71
documentation/configuration/multiple-standalone-k3s-nodes.md
Normal file
|
@ -0,0 +1,71 @@
|
|||
# Multiple standalone K3s nodes
|
||||
|
||||
This is an example of when you might want to configure multiple standalone
|
||||
k3s nodes simultaneously. For this we will assume a hypothetical situation
|
||||
where we are configuring 25 Raspberry Pis to deploy to our shop floors.
|
||||
|
||||
Each Rasperry Pi will be configured as a standalone IoT device hosting an
|
||||
application that will push data to head office.
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
+-------------+
|
||||
| |
|
||||
| Node-01 +-+
|
||||
| | |
|
||||
+--+----------+ +-+
|
||||
| | |
|
||||
+--+---------+ +-+
|
||||
| | |
|
||||
+--+--------+ |
|
||||
| | Node-N
|
||||
+----------+
|
||||
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Below is our example inventory of 200 nodes (Truncated):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_workers:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
# ..... SNIP .....
|
||||
|
||||
kube-199:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.201
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-200:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.202
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
In our `group_vars/` (or as `vars:` in our playbook), we will need to set the
|
||||
`k3s_build_cluster` variable to `false`. This will stop the role from
|
||||
attempting to cluster all 200 nodes, instead it will install k3s across each
|
||||
node as as 200 standalone servers.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_build_cluster: false
|
||||
```
|
69
documentation/operations/extending-a-cluster.md
Normal file
69
documentation/operations/extending-a-cluster.md
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Extending a cluster
|
||||
|
||||
This document describes the method for extending an cluster with new worker
|
||||
nodes.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
Currently your `inventory.yml` looks like this, it has two nodes defined,
|
||||
`kube-0` (control node) and `kube-1` (worker node).
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
## Method
|
||||
|
||||
We have our two nodes, one control, one worker. The goal is to extend this to
|
||||
add capacity by adding a new worker node, `kube-2`. To do this we will add the
|
||||
new node to our inventory.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
Once the new node has been added, you can re-run the automation to join it to
|
||||
the cluster. You should expect the majority of changes to the worker node being
|
||||
introduced to the cluster.
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=53 changed=1 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0
|
||||
kube-1 : ok=40 changed=1 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
kube-2 : ok=42 changed=10 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
```
|
74
documentation/operations/shrinking-a-cluster.md
Normal file
74
documentation/operations/shrinking-a-cluster.md
Normal file
|
@ -0,0 +1,74 @@
|
|||
# Shrinking a cluster
|
||||
|
||||
This document describes the method for shrinking a cluster, by removing a
|
||||
worker nodes.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
Currently your `inventory.yml` looks like this, it has three nodes defined,
|
||||
`kube-0` (control node) and `kube-1`, `kube-2` (worker nodes).
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
```
|
||||
|
||||
## Method
|
||||
|
||||
We have our three nodes, one control, two workers. The goal is to shrink this to
|
||||
remove excess capacity by offboarding the worker node `kube-2`. To do this we
|
||||
will set `kube-2` node to `k3s_state: uninstalled` in our inventory.
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_state: uninstalled
|
||||
```
|
||||
|
||||
What you will typically see is changes to your control plane (`kube-0`) and the
|
||||
node being removed (`kube-2`). The role will register the removal of the node
|
||||
with the cluster by draining the node and removing it from the cluster.
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=55 changed=2 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-1 : ok=40 changed=0 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0
|
||||
kube-2 : ok=23 changed=2 unreachable=0 failed=0 skipped=17 rescued=0 ignored=1
|
||||
```
|
93
documentation/operations/stop-start-cluster.md
Normal file
93
documentation/operations/stop-start-cluster.md
Normal file
|
@ -0,0 +1,93 @@
|
|||
# Stopping and Starting a cluster
|
||||
|
||||
This document describes the Ansible method for restarting a k3s cluster
|
||||
deployed by this role.
|
||||
|
||||
## Assumptions
|
||||
|
||||
It is assumed that you have already deployed a k3s cluster using this role,
|
||||
you have an appropriately configured inventory and playbook to create the
|
||||
cluster.
|
||||
|
||||
Below, our example inventory and playbook are as follows:
|
||||
|
||||
- inventory: `inventory.yml`
|
||||
- playbook: `cluster.yml`
|
||||
|
||||
## Method
|
||||
|
||||
### Start cluster
|
||||
|
||||
You can start the cluster using either of the following commands:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=started'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=started' --become all`
|
||||
|
||||
Below is example output, remember that Ansible is idempotent so re-running a
|
||||
command may not necessarily change the state.
|
||||
|
||||
**Playbook method output**:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-1 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-2 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
### Stop cluster
|
||||
|
||||
You can stop the cluster using either of the following commands:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=stopped'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=stopped' --become all`
|
||||
|
||||
Below is example output, remember that Ansible is idempotent so re-running a
|
||||
command may not necessarily change the state.
|
||||
|
||||
**Playbook method output**:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-1 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
kube-2 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
### Restart cluster
|
||||
|
||||
Just like the `service` module, you can also specify `restarted` as a state.
|
||||
This will do `stop` followed by `start`.
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted'`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become all`
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-2 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
You can limit the targets by adding the `-l` flag to your `ansible-playbook`
|
||||
command, or simply target your ad-hoc commands. For example, in a 3 node
|
||||
cluster (called `kube-0`, `kube-1` and `kube-2`) we can limit the restart to
|
||||
`kube-1` and `kube-2` with the following:
|
||||
|
||||
- Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted' -l "kube-1,kube-2"`
|
||||
- Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become "kube-1,kube-2"`
|
||||
|
||||
```text
|
||||
PLAY RECAP ********************************************************************************************************
|
||||
kube-1 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
kube-2 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
1. _Why might I use the `ansible-playbook` command over an ad-hoc command?_
|
||||
- The stop/start tasks will be aware of configuration. As the role
|
||||
develops, there might be some pre-tasks added to change how a cluster
|
||||
is stopped or started.
|
147
documentation/quickstart-cluster.md
Normal file
147
documentation/quickstart-cluster.md
Normal file
|
@ -0,0 +1,147 @@
|
|||
# Quickstart: K3s cluster with a single control node
|
||||
|
||||
This is the quickstart guide to creating your own k3s cluster with one control
|
||||
plane node. This control plane node will also be a worker.
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
servers over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ cluster.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our servers called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-2 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for the k3s cluster (`cluster.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a cluster with a single control node
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_become_for_all: true
|
||||
roles:
|
||||
- xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml cluster.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes. The default behavior of this role is to delegate the first play host as
|
||||
the control node, so kube-0 will have more changed tasks than others:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=56 changed=11 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-1 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0
|
||||
kube-2 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into kube-0, we can test that k3s is running across the cluster,
|
||||
that all nodes are ready and that everything is ready to execute our Kubernetes
|
||||
workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes -o wide`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: 0644` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready master 34s v1.19.4+k3s1 10.0.2.15 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-2 Ready <none> 14s v1.19.4+k3s1 10.0.2.17 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-1 Ready <none> 14s v1.19.4+k3s1 10.0.2.16 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
ansible@kube-0:~$
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system local-path-provisioner-7ff9579c6-72j8x 1/1 Running 0 55s 10.42.2.2 kube-1 <none> <none>
|
||||
kube-system metrics-server-7b4f8b595-lkspj 1/1 Running 0 55s 10.42.1.2 kube-2 <none> <none>
|
||||
kube-system helm-install-traefik-b6vnt 0/1 Completed 0 55s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system coredns-66c464876b-llsh7 1/1 Running 0 55s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-jrqg7 2/2 Running 0 27s 10.42.1.3 kube-2 <none> <none>
|
||||
kube-system svclb-traefik-gh65q 2/2 Running 0 27s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-5z7zp 2/2 Running 0 27s 10.42.2.3 kube-1 <none> <none>
|
||||
kube-system traefik-5dd496474-l2k74 1/1 Running 0 27s 10.42.1.4 kube-2 <none> <none>
|
||||
```
|
154
documentation/quickstart-ha-cluster.md
Normal file
154
documentation/quickstart-ha-cluster.md
Normal file
|
@ -0,0 +1,154 @@
|
|||
# Quickstart: K3s cluster with a HA control plane using embedded etcd
|
||||
|
||||
This is the quickstart guide to creating your own 3 node k3s cluster with a
|
||||
highly available control plane using the embedded etcd datastore.
|
||||
The control plane will all be workers as well.
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
servers over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ ha_cluster.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our servers called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
# We're adding k3s_control_node to each host, this can be done in host_vars/
|
||||
# or group_vars/ as well - but for simplicity we are setting it here.
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
kube-1:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.3
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
kube-2:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.4
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
k3s_control_node: true
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
kube-2 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for the k3s cluster (`ha_cluster.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a cluster with HA control plane
|
||||
hosts: k3s_cluster
|
||||
vars:
|
||||
k3s_become_for_all: true
|
||||
k3s_etcd_datastore: true
|
||||
k3s_use_experimental: true # Note this is required for k3s v1.19.4+k3s1
|
||||
roles:
|
||||
- xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml ha_cluster.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes. The default behavior of this role is to delegate the first play host as
|
||||
the primary control node, so kube-0 will have more changed tasks than others:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=53 changed=8 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0
|
||||
kube-1 : ok=47 changed=10 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
kube-2 : ok=47 changed=9 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into any of the servers (it doesn't matter), we can test that k3s
|
||||
is running across the cluster, that all nodes are ready and that everything is
|
||||
ready to execute our Kubernetes workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes -o wide`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: 0644` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes -o wide
|
||||
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
|
||||
kube-0 Ready etcd,master 2m58s v1.19.4+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-1 Ready etcd,master 2m22s v1.19.4+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
kube-2 Ready etcd,master 2m10s v1.19.4+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system coredns-66c464876b-rhgn6 1/1 Running 0 3m38s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-vwglv 0/1 Completed 0 3m39s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system local-path-provisioner-7ff9579c6-d5xpb 1/1 Running 0 3m38s 10.42.0.5 kube-0 <none> <none>
|
||||
kube-system metrics-server-7b4f8b595-nhbt8 1/1 Running 0 3m38s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-9lzcq 2/2 Running 0 2m56s 10.42.1.2 kube-1 <none> <none>
|
||||
kube-system svclb-traefik-vq487 2/2 Running 0 2m45s 10.42.2.2 kube-2 <none> <none>
|
||||
kube-system svclb-traefik-wkwkk 2/2 Running 0 3m1s 10.42.0.7 kube-0 <none> <none>
|
||||
kube-system traefik-5dd496474-lw6x8 1/1 Running 0 3m1s 10.42.0.6 kube-0 <none> <none>
|
||||
```
|
121
documentation/quickstart-single-node.md
Normal file
121
documentation/quickstart-single-node.md
Normal file
|
@ -0,0 +1,121 @@
|
|||
# Quickstart: K3s single node
|
||||
|
||||
This is the quickstart guide to creating your own single-node k3s "cluster".
|
||||
|
||||
:hand: This example requires your Ansible user to be able to connect to the
|
||||
server over SSH using key-based authentication. The user is also has an entry
|
||||
in a sudoers file that allows privilege escalation without requiring a
|
||||
password.
|
||||
|
||||
To test this is the case, run the following check replacing `<ansible_user>`
|
||||
and `<server_name>`. The expected output is `Works`
|
||||
|
||||
`ssh <ansible_user>@<server_name> 'sudo cat /etc/shadow >/dev/null && echo "Works"'`
|
||||
|
||||
For example:
|
||||
|
||||
```text
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"'
|
||||
Works
|
||||
[ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
Our working directory will have the following files:
|
||||
|
||||
```text
|
||||
kubernetes-playground/
|
||||
|_ inventory.yml
|
||||
|_ single_node.yml
|
||||
```
|
||||
|
||||
## Inventory
|
||||
|
||||
Here's a YAML based example inventory for our server called `inventory.yml`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
k3s_cluster:
|
||||
hosts:
|
||||
kube-0:
|
||||
ansible_user: ansible
|
||||
ansible_host: 10.10.9.2
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
```
|
||||
|
||||
We can test this works with `ansible -i inventory.yml -m ping all`, expected
|
||||
result:
|
||||
|
||||
```text
|
||||
kube-0 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
## Playbook
|
||||
|
||||
Here is our playbook for a single node k3s cluster (`single_node.yml`):
|
||||
|
||||
```yaml
|
||||
---
|
||||
|
||||
- name: Build a single node k3s cluster
|
||||
hosts: kube-0
|
||||
vars:
|
||||
k3s_become_for_all: true
|
||||
roles:
|
||||
- xanmanning.k3s
|
||||
```
|
||||
|
||||
## Execution
|
||||
|
||||
To execute the playbook against our inventory file, we will run the following
|
||||
command:
|
||||
|
||||
`ansible-playbook -i inventory.yml single_node.yml`
|
||||
|
||||
The output we can expect is similar to the below, with no failed or unreachable
|
||||
nodes:
|
||||
|
||||
```text
|
||||
PLAY RECAP *******************************************************************************************************
|
||||
kube-0 : ok=39 changed=8 unreachable=0 failed=0 skipped=39 rescued=0 ignored=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
After logging into the server, we can test that k3s is running and that it is
|
||||
ready to execute our Kubernetes workloads by running the following:
|
||||
|
||||
- `sudo kubectl get nodes`
|
||||
- `sudo kubectl get pods -o wide --all-namespaces`
|
||||
|
||||
:hand: Note we are using `sudo` because we need to be root to access the
|
||||
kube config for this node. This behavior can be changed with specifying
|
||||
`write-kubeconfig-mode: 0644` in `k3s_server`.
|
||||
|
||||
**Get Nodes**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kube-0 Ready master 5m27s v1.19.4+k3s
|
||||
ansible@kube-0:~$
|
||||
```
|
||||
|
||||
**Get Pods**:
|
||||
|
||||
```text
|
||||
ansible@kube-0:~$ sudo kubectl get pods --all-namespaces -o wide
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
kube-system metrics-server-7b4f8b595-k692h 1/1 Running 0 9m38s 10.42.0.2 kube-0 <none> <none>
|
||||
kube-system local-path-provisioner-7ff9579c6-5lgzb 1/1 Running 0 9m38s 10.42.0.3 kube-0 <none> <none>
|
||||
kube-system coredns-66c464876b-xg42q 1/1 Running 0 9m38s 10.42.0.5 kube-0 <none> <none>
|
||||
kube-system helm-install-traefik-tdpcs 0/1 Completed 0 9m38s 10.42.0.4 kube-0 <none> <none>
|
||||
kube-system svclb-traefik-hk248 2/2 Running 0 9m4s 10.42.0.7 kube-0 <none> <none>
|
||||
kube-system traefik-5dd496474-bf4kv 1/1 Running 0 9m4s 10.42.0.6 kube-0 <none> <none>
|
||||
```
|
|
@ -4,6 +4,6 @@
|
|||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_cluster_state: downloaded
|
||||
k3s_state: downloaded
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_cluster_state: restarted
|
||||
k3s_state: restarted
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_cluster_state: started
|
||||
k3s_state: started
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_cluster_state: stopped
|
||||
k3s_state: stopped
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
become: true
|
||||
vars:
|
||||
molecule_is_test: true
|
||||
k3s_cluster_state: uninstalled
|
||||
k3s_state: uninstalled
|
||||
roles:
|
||||
- role: xanmanning.k3s
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
- name: Ensure NODE_TOKEN is captured from control node
|
||||
slurp:
|
||||
path: "{{ k3s_node_data_dir | default ('/var/lib/rancher/k3s') }}/server/node-token"
|
||||
path: "{{ k3s_runtime_config['data-dir'] | default ('/var/lib/rancher/k3s') }}/server/node-token"
|
||||
register: k3s_slurped_control_token
|
||||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
when: k3s_control_token is not defined and not ansible_check_mode
|
||||
|
@ -21,7 +21,7 @@
|
|||
|
||||
- name: Ensure the cluster NODE_TOKEN file location exists
|
||||
file:
|
||||
path: "{{ k3s_token_location }}"
|
||||
path: "{{ k3s_token_location | dirname }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}"
|
||||
|
@ -29,7 +29,7 @@
|
|||
- name: Ensure k3s cluster token file is present on workers and secondary control nodes
|
||||
template:
|
||||
src: cluster-token.j2
|
||||
dest: "{{ k3s_token_location }}/cluster-token"
|
||||
dest: "{{ k3s_token_location }}"
|
||||
mode: 0600
|
||||
become: "{{ k3s_become_for_install_dir | ternary(true, false, k3s_become_for_all) }}"
|
||||
when: (k3s_control_node and not k3s_primary_control_node)
|
||||
|
@ -40,7 +40,7 @@
|
|||
- name: Ensure k3s service unit file is present
|
||||
template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ k3s_systemd_unit_directory }}/k3s.service"
|
||||
dest: "{{ k3s_systemd_unit_dir }}/k3s.service"
|
||||
mode: 0644
|
||||
become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}"
|
||||
notify:
|
||||
|
|
|
@ -3,5 +3,7 @@
|
|||
- name: Ensure systemd context is correct if we are running k3s rootless
|
||||
set_fact:
|
||||
k3s_systemd_context: user
|
||||
k3s_systemd_unit_directory: "{{ ansible_user_dir }}/.config/systemd/user"
|
||||
when: k3s_non_root is defined and k3s_non_root
|
||||
k3s_systemd_unit_dir: "{{ ansible_user_dir }}/.config/systemd/user"
|
||||
when: k3s_runtime_config is defined
|
||||
and "rootless" in k3s_runtime_config
|
||||
and k3s_runtime_config.rootless
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
- name: Ensure k3s service unit file is present
|
||||
template:
|
||||
src: k3s.service.j2
|
||||
dest: "{{ k3s_systemd_unit_directory }}/k3s.service"
|
||||
dest: "{{ k3s_systemd_unit_dir }}/k3s.service"
|
||||
mode: 0644
|
||||
notify:
|
||||
- reload systemd
|
||||
|
@ -43,6 +43,9 @@
|
|||
dest: "/usr/local/bin/k3s-killall.sh"
|
||||
mode: 0700
|
||||
become: "{{ k3s_become_for_usr_local_bin | ternary(true, false, k3s_become_for_all) }}"
|
||||
when: k3s_runtime_config is defined
|
||||
and ("rootless" not in k3s_runtime_config
|
||||
or not k3s_runtime_config.rootless)
|
||||
|
||||
- name: Ensure k3s uninstall script is present
|
||||
template:
|
||||
|
@ -50,3 +53,6 @@
|
|||
dest: "/usr/local/bin/k3s-uninstall.sh"
|
||||
mode: 0700
|
||||
become: "{{ k3s_become_for_usr_local_bin | ternary(true, false, k3s_become_for_all) }}"
|
||||
when: k3s_runtime_config is defined
|
||||
and ("rootless" not in k3s_runtime_config
|
||||
or not k3s_runtime_config.rootless)
|
||||
|
|
|
@ -9,11 +9,21 @@
|
|||
|
||||
- name: Ensure systemd unit file directory exists
|
||||
file:
|
||||
path: "{{ k3s_systemd_unit_directory }}"
|
||||
path: "{{ k3s_systemd_unit_dir }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: "{{ k3s_become_for_systemd | ternary(true, false, k3s_become_for_all) }}"
|
||||
|
||||
- name: Ensure data directory exists when not using the default
|
||||
file:
|
||||
path: "{{ k3s_runtime_config['data-dir'] | default('/var/lib/rancher/k3s') }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
become: "{{ k3s_become_for_data_dir | ternary(true, false, k3s_become_for_all) }}"
|
||||
when: k3s_runtime_config is defined
|
||||
and "data-dir" in k3s_runtime_config
|
||||
and k3s_runtime_config['data-dir'] != "/var/lib/rancher/k3s"
|
||||
|
||||
- include_tasks: install-k3s-node.yml
|
||||
when: ((k3s_control_node and k3s_controller_count | length == 1)
|
||||
or (k3s_primary_control_node and k3s_controller_count | length > 1)) and not ansible_check_mode
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
---
|
||||
|
||||
- name: Ensure k3s_build_cluster is false if running against a single node.
|
||||
set_fact:
|
||||
k3s_build_cluster: false
|
||||
when: play_hosts | length < 2
|
||||
and k3s_control_node_address is not defined
|
||||
|
||||
- name: Ensure k3s control node fact is set
|
||||
set_fact:
|
||||
k3s_control_node: "{{ 'false' if k3s_build_cluster else 'true' }}"
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
run_once: true
|
||||
when: item in kubectl_get_nodes_result.stdout
|
||||
and hostvars[item].k3s_cluster_state is defined
|
||||
and hostvars[item].k3s_cluster_state == 'uninstalled'
|
||||
and hostvars[item].k3s_state is defined
|
||||
and hostvars[item].k3s_state == 'uninstalled'
|
||||
loop: "{{ play_hosts }}"
|
||||
become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}"
|
||||
|
||||
|
@ -33,8 +33,8 @@
|
|||
delegate_to: "{{ k3s_control_delegate }}"
|
||||
run_once: true
|
||||
when: item in kubectl_get_nodes_result.stdout
|
||||
and hostvars[item].k3s_cluster_state is defined
|
||||
and hostvars[item].k3s_cluster_state == 'uninstalled'
|
||||
and hostvars[item].k3s_state is defined
|
||||
and hostvars[item].k3s_state == 'uninstalled'
|
||||
loop: "{{ play_hosts }}"
|
||||
become: "{{ k3s_become_for_kubectl | ternary(true, false, k3s_become_for_all) }}"
|
||||
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
---
|
||||
|
||||
- name: Check if newuidmap is available
|
||||
command: which newuidmap
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: k3s_check_newuidmap_installed
|
||||
|
||||
- name: Check if /proc/sys/kernel/unprivileged_userns_clone exists
|
||||
stat:
|
||||
path: /proc/sys/kernel/unprivileged_userns_clone
|
||||
|
@ -49,6 +55,7 @@
|
|||
- k3s_current_user_subuid.split(':')[2] | int >= 65536
|
||||
- k3s_current_user_subgid.split(':')[2] | int >= 65536
|
||||
- ansible_env['XDG_RUNTIME_DIR'] is defined
|
||||
- k3s_check_newuidmap_installed.rc == 0
|
||||
success_msg: All kernel parameters passed
|
||||
fail_msg: Kernel parameters are not set correctly, please check
|
||||
https://github.com/rootless-containers/rootlesskit
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
success_msg: "{{ k3s_release_version }} is supported by this role."
|
||||
fail_msg: "{{ k3s_release_version }} is not supported by this role, please use xanmanning.k3s v1.x."
|
||||
|
||||
- name: Check configuration in k3s_server and k3s_agent that needs special configuration.
|
||||
- name: Check configuration in k3s_server and k3s_agent that needs special configuration
|
||||
assert:
|
||||
that:
|
||||
- (item.setting not in k3s_runtime_config)
|
||||
|
@ -15,6 +15,16 @@
|
|||
fail_msg: "{{ item.setting }} found in server/agent config. Please set {{ item.correction }} to use this option."
|
||||
loop: "{{ k3s_config_exclude }}"
|
||||
|
||||
- name: Check configuration in k3s_server and k3s_agent for deprecated configuration
|
||||
assert:
|
||||
that:
|
||||
- (item.setting not in k3s_runtime_config)
|
||||
success_msg: "{{ item.setting }} not found in server/agent config"
|
||||
fail_msg: "{{ item.setting }} found in server/agent config. Please set {{ item.correction }} to use this option."
|
||||
loop: "{{ k3s_deprecated_config }}"
|
||||
when: item.when is not defined
|
||||
or (item.when is defined and (k3s_release_version | replace('v', '')) is version_compare(item.when, '>='))
|
||||
|
||||
- name: Check configuration in k3s_server and k3s_agent against release version
|
||||
assert:
|
||||
that:
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
- import_tasks: check-unsupported-rootless.yml
|
||||
when: ("rootless" in k3s_runtime_config)
|
||||
and k3s_runtime_config.rootless
|
||||
and k3s_use_unsupported_config
|
||||
|
||||
- import_tasks: check-control-count.yml
|
||||
when: k3s_build_cluster is defined and k3s_build_cluster
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[ $(id -u) -eq 0 ] || exec sudo $0 $@
|
||||
|
||||
for bin in {{ k3s_node_data_dir | default('/var/lib/rancher/k3s') }}/data/**/bin/; do
|
||||
for bin in {{ k3s_runtime_config['data-dir'] | default('/var/lib/rancher/k3s') }}/data/**/bin/; do
|
||||
[ -d "$bin" ] && export PATH=$bin:$PATH
|
||||
done
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ ExecStart={{ k3s_install_dir }}/k3s
|
|||
server
|
||||
{% if (k3s_etcd_datastore is defined and k3s_etcd_datastore) and (k3s_primary_control_node is not defined or not k3s_primary_control_node) %}
|
||||
--server https://{{ k3s_control_node_address }}:{{ k3s_runtime_config['https-listen-port'] | default(6443) }}
|
||||
--token-file {{ k3s_token_location }}/cluster-token
|
||||
--token-file {{ k3s_token_location }}
|
||||
{% endif %}
|
||||
{% if k3s_server is defined %}
|
||||
--config {{ k3s_config_file }}
|
||||
|
@ -26,7 +26,7 @@ ExecStart={{ k3s_install_dir }}/k3s
|
|||
{% else %}
|
||||
agent
|
||||
--server https://{{ k3s_control_node_address }}:{{ k3s_runtime_config['https-listen-port'] | default(6443) }}
|
||||
--token-file {{ k3s_token_location }}/cluster-token
|
||||
--token-file {{ k3s_token_location }}
|
||||
{% if k3s_agent is defined %}
|
||||
--config {{ k3s_config_file }}
|
||||
{% endif %}
|
||||
|
|
|
@ -60,10 +60,10 @@ k3s_systemd_context: system
|
|||
|
||||
# Directory for systemd unit files to be installed. As this role doesn't use package
|
||||
# management, this should live in /etc/systemd, not /lib/systemd
|
||||
k3s_systemd_unit_directory: "/etc/systemd/{{ k3s_systemd_context }}"
|
||||
k3s_systemd_unit_dir: "/etc/systemd/{{ k3s_systemd_context }}"
|
||||
|
||||
# Directory for gathering the k3s token for clustering. I don't see this changing.
|
||||
k3s_token_location: "/etc/rancher"
|
||||
k3s_token_location: "/etc/rancher/cluster-token"
|
||||
|
||||
# Path for additional Kubernetes Manifests
|
||||
# https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests
|
||||
|
@ -71,8 +71,14 @@ k3s_server_manifests_dir: /var/lib/rancher/k3s/server/manifests
|
|||
|
||||
# Config items that should not appear in k3s_server or k3s_agent
|
||||
k3s_config_exclude:
|
||||
- setting: "cluster-init"
|
||||
correction: "k3s_etcd_datastore"
|
||||
- setting: server
|
||||
correction: k3s_control_node_address
|
||||
- setting: cluster-init
|
||||
correction: k3s_etcd_datastore
|
||||
- setting: token
|
||||
correction: k3s_control_token
|
||||
- setting: token-file
|
||||
correction: k3s_token_location
|
||||
|
||||
# Config items and the versions that they were introduced
|
||||
# k3s_config_version_check:
|
||||
|
@ -85,3 +91,16 @@ k3s_experimental_config:
|
|||
until: 1.19.4
|
||||
- setting: rootless
|
||||
- setting: secrets-encryption
|
||||
- setting: agent-token
|
||||
- setting: agent-token-file
|
||||
- setting: cluster-reset
|
||||
|
||||
# Config items that should be marked as deprecated
|
||||
k3s_deprecated_config:
|
||||
- setting: no-flannel
|
||||
correction: "flannel-backend: 'none'"
|
||||
# when: 0.10.2 # Example
|
||||
- setting: cluster-secret
|
||||
correction: token
|
||||
- setting: no-deploy
|
||||
correction: "disable: VALUE"
|
||||
|
|
Loading…
Reference in a new issue