Goal: Generate a secure k3s token, deploy a single-master / two-worker k3s cluster with Ansible, and verify the cluster with kubectl
.
Reference environment: Kubuntu 25 host (fullstacklab.site
), SSH user stackadmin
; VMs as in Day 1–3. Day 3 covered pre-flight (swap off, sysctl, modules) and UFW firewall (22/tcp, 6443/tcp, 8472/udp, 30000–32767/tcp+udp).
Step 1 — Generate a strong cluster token
export K3S_TOKEN="$(tr -dc A-Za-z0-9 </dev/urandom | head -c 48)"
echo "$K3S_TOKEN"
Step 2 — Create the Ansible role and playbook
We’ll add a minimal role k3s
and a playbook that targets both the master and workers. To make installs resilient, the installer is downloaded once on the control host and copied to nodes (works even if a node temporarily lacks DNS), with retries for robustness.
File: ansible/k3s.yml
---
- name: Install k3s (server + agents)
hosts: k3s_master:k3s_workers
become: true
roles:
- k3s
File: ansible/roles/k3s/defaults/main.yml
# k3s defaults
k3s_version: "v1.29.6+k3s1"
k3s_token: "CHANGE_ME_SUPERSECRET"
k3s_master_ip: "192.168.56.10"
# Copy kubeconfig to this user on the master for convenience
install_kubectl_for_user: true
kubectl_user: "{{ ansible_user | default('stackadmin') }}"
File: ansible/roles/k3s/tasks/main.yml
---
# Decide per host whether to run server or agent
- name: Install master (server) on k3s_master
include_tasks: server.yml
when: "'k3s_master' in group_names"
- name: Install agent on k3s_workers
include_tasks: agent.yml
when: "'k3s_workers' in group_names"
File: ansible/roles/k3s/tasks/server.yml
---
- name: Ensure prerequisites present
ansible.builtin.package:
name: curl
state: present
# Download once on control node (robust with retries), then push to remotes
- name: Fetch k3s installer locally (once, with retries)
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/install_k3s.sh
mode: '0755'
delegate_to: localhost
run_once: true
register: dl
until: dl is succeeded
retries: 5
delay: 5
- name: Push installer to remote
ansible.builtin.copy:
src: /tmp/install_k3s.sh
dest: /tmp/install_k3s.sh
mode: '0755'
# Idempotent install: create service only once
- name: Install k3s server
ansible.builtin.shell: |
INSTALL_K3S_VERSION="{{ k3s_version }}" \
K3S_TOKEN="{{ k3s_token }}" \
K3S_KUBECONFIG_MODE="644" \
sh /tmp/install_k3s.sh server --write-kubeconfig-mode=644 --tls-san {{ k3s_master_ip }}
args:
creates: /etc/systemd/system/k3s.service
- name: Wait for API to come up on localhost:6443
ansible.builtin.wait_for:
host: 127.0.0.1
port: 6443
delay: 5
timeout: 180
# Rewrite kubeconfig to use master IP instead of 127.0.0.1
- name: Point kubeconfig to master IP
ansible.builtin.replace:
path: /etc/rancher/k3s/k3s.yaml
regexp: '^(\s*server:\s*)https?://.*:6443'
replace: '\1https://{{ k3s_master_ip }}:6443'
- name: Ensure kubeconfig for SSH user on master
when: install_kubectl_for_user | bool
block:
- name: Create .kube dir
ansible.builtin.file:
path: "/home/{{ kubectl_user }}/.kube"
state: directory
owner: "{{ kubectl_user }}"
group: "{{ kubectl_user }}"
mode: '0700'
- name: Copy kubeconfig to user
ansible.builtin.copy:
src: /etc/rancher/k3s/k3s.yaml
dest: "/home/{{ kubectl_user }}/.kube/config"
owner: "{{ kubectl_user }}"
group: "{{ kubectl_user }}"
mode: '0600'
remote_src: true
File: ansible/roles/k3s/tasks/agent.yml
---
- name: Ensure prerequisites present
ansible.builtin.package:
name: curl
state: present
# Reuse the locally-downloaded installer
- name: Push installer to remote
ansible.builtin.copy:
src: /tmp/install_k3s.sh
dest: /tmp/install_k3s.sh
mode: '0755'
# Idempotent install: create service only once
- name: Install k3s agent
ansible.builtin.shell: |
INSTALL_K3S_VERSION="{{ k3s_version }}" \
K3S_URL="https://{{ k3s_master_ip }}:6443" \
K3S_TOKEN="{{ k3s_token }}" \
sh /tmp/install_k3s.sh agent
args:
creates: /etc/systemd/system/k3s-agent.service
- name: Verify master API reachable from agent
ansible.builtin.wait_for:
host: "{{ k3s_master_ip }}"
port: 6443
timeout: 60
delay: 2
Step 3 — Run the playbook
# Supply your token and run:
export K3S_TOKEN="$(tr -dc A-Za-z0-9 </dev/urandom | head -c 48)"
ansible-playbook -i ansible/inventory/hosts.ini ansible/k3s.yml -b \
-e k3s_version="v1.29.6+k3s1" \
-e k3s_token="$K3S_TOKEN" \
-e k3s_master_ip="192.168.56.10" \
-e install_kubectl_for_user=true \
-e kubectl_user="stackadmin"
Step 4 — Verify the cluster
kubectl get nodes -o wide
kubectl get pods -A
kubectl cluster-info
Output:
kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k3s-master Ready control-plane,master 8m13s v1.29.6+k3s1 172.16.9.129 <none> Ubuntu 24.04.3 LTS 6.8.0-84-generic containerd://1.7.17-k3s1
k3s-w1 Ready <none> 27s v1.29.6+k3s1 172.16.9.128 <none> Ubuntu 24.04.3 LTS 6.8.0-84-generic containerd://1.7.17-k3s1
k3s-w2 Ready <none> 6m30s v1.29.6+k3s1 172.16.9.128 <none> Ubuntu 24.04.3 LTS 6.8.0-84-generic containerd://1.7.17-k3s1
kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-6799fbcd5-6bkgk 1/1 Running 0 8m4s
kube-system helm-install-traefik-crd-62sfd 0/1 Completed 0 8m4s
kube-system helm-install-traefik-h8hdb 0/1 Completed 1 8m4s
kube-system local-path-provisioner-6f5d79df6-5bclg 1/1 Running 0 8m4s
kube-system metrics-server-54fd9b65b-d2vkf 1/1 Running 0 8m4s
kube-system svclb-traefik-45d511b1-8sggf 0/2 ContainerCreating 0 32s
kube-system svclb-traefik-45d511b1-b4qbk 2/2 Running 0 7m48s
kube-system svclb-traefik-45d511b1-cfkr9 2/2 Running 0 6m36s
kube-system traefik-7d5f6474df-9ppz9 1/1 Running 0 7m48s
kubectl cluster-info
Kubernetes control plane is running at https://192.168.56.10:6443
CoreDNS is running at https://192.168.56.10:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
Metrics-server is running at https://192.168.56.10:6443/api/v1/namespaces/kube-system/services/https:metrics-server:https/proxy
Troubleshooting
- Agents not joining: verify the token, time sync (chrony/systemd-timesyncd), UFW (6443/tcp on master, 8472/udp on all nodes), and that NodePort range is open for later services.
- API unreachable: ensure 6443/tcp reaches the master; on master, check
ss -lntp | grep 6443
and logsjournalctl -u k3s -e
. - DNS hiccups: downloading the installer on the control node and copying it to hosts avoids per-node DNS issues; we use retries on download for robustness.