From 4fb455c6b461a9ff12b7ab354e27cb9c7a654251 Mon Sep 17 00:00:00 2001 From: Thomas Kleinendorst Date: Wed, 5 Jun 2024 12:57:41 +0200 Subject: [PATCH] Deduplicate Podman container logic with new role --- inventory/hosts | 2 + roles/actual/tasks/main.yml | 62 +++------- roles/changedetection/handlers/main.yml | 7 -- roles/changedetection/tasks/main.yml | 58 +++------- roles/changedetection/vars/main/defaults.yml | 1 + roles/pi-hole/handlers/main.yml | 6 - roles/pi-hole/tasks/main.yml | 106 +++++------------- roles/pi-hole/vars/main/defaults.yml | 1 + .../handlers/main.yml | 0 roles/podman-container/tasks/main.yml | 75 +++++++++++++ roles/podman-container/vars/main/defaults.yml | 2 + 11 files changed, 141 insertions(+), 179 deletions(-) delete mode 100644 roles/changedetection/handlers/main.yml rename roles/{actual => podman-container}/handlers/main.yml (100%) create mode 100644 roles/podman-container/tasks/main.yml create mode 100644 roles/podman-container/vars/main/defaults.yml diff --git a/inventory/hosts b/inventory/hosts index bec64dc..2f72226 100644 --- a/inventory/hosts +++ b/inventory/hosts @@ -2,3 +2,5 @@ # Notice that domain names won't work on intial runs since the DNS service hosting the name # is installed as part of the scripting contained within this repo. raspberry-pi-1.kleinendorst.info hostname=raspberry-pi-1 +# ⬇️ Comment out the line above and uncomment this line when the pi-hole service isn't working correctly. +# 192.168.50.27 hostname=raspberry-pi-1 diff --git a/roles/actual/tasks/main.yml b/roles/actual/tasks/main.yml index dc65a9a..dee4de3 100644 --- a/roles/actual/tasks/main.yml +++ b/roles/actual/tasks/main.yml @@ -5,52 +5,22 @@ vars: user_username: "{{ actual_username }}" user_password: "{{ actual_password }}" - user_start_podman_restart: true -- name: Create a directory for holding actual's (volume) data - become: true - become_user: "{{ actual_username }}" - ansible.builtin.file: - path: "/home/{{ actual_username }}/actual_data" - state: directory - mode: '0700' -- name: Gather facts on the actual container - become: true - become_user: "{{ actual_username }}" - containers.podman.podman_container_info: - name: actual-server - register: actual_server_container_info -- name: Start the actual container with correct systemd linking - when: not actual_server_container_info.containers[0]["Config"]["Image"] is match(".*:" + actual_version) - become: true - become_user: "{{ actual_username }}" - block: - - name: Remove the actual container - containers.podman.podman_container: - name: actual-server - state: absent - - name: Start the Actual container - containers.podman.podman_container: - name: actual-server - image: "docker.io/actualbudget/actual-server:{{ actual_version }}" - restart_policy: always - publish: - - 127.0.0.1:5006:5006 - volumes: - - "/home/{{ actual_username }}/actual_data:/data" - state: stopped - # For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/ - generate_systemd: - path: "/home/{{ actual_username }}/.config/systemd/user/" - restart_policy: always - notify: Reload systemd (daemon-reload) - - name: Flush handlers - ansible.builtin.meta: flush_handlers - - name: Enable the newly created systemd service for user - ansible.builtin.systemd: - name: container-actual-server.service - state: started - enabled: true - scope: user + user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role +- name: Create the actual container + ansible.builtin.include_role: + name: podman-container + apply: + become: true + become_user: "{{ actual_username }}" + vars: + podman_container_name: actual-server + podman_container_image: docker.io/actualbudget/actual-server + podman_container_tag: "{{ actual_version }}" + podman_container_publish: + - 127.0.0.1:5006:5006 + podman_container_volumes: + - name: actual_data + mnt: /data - name: Include simple-reverse-proxy role ansible.builtin.include_role: name: simple-reverse-proxy diff --git a/roles/changedetection/handlers/main.yml b/roles/changedetection/handlers/main.yml deleted file mode 100644 index d1c9fa4..0000000 --- a/roles/changedetection/handlers/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Reload systemd (daemon-reload) - become: true - become_user: "{{ actual_username }}" - ansible.builtin.systemd_service: - daemon_reload: true - scope: user diff --git a/roles/changedetection/tasks/main.yml b/roles/changedetection/tasks/main.yml index c568b7a..da69caa 100644 --- a/roles/changedetection/tasks/main.yml +++ b/roles/changedetection/tasks/main.yml @@ -5,48 +5,22 @@ vars: user_username: "{{ changedetection_username }}" user_password: "{{ changedetection_password }}" - user_start_podman_restart: true -- name: Create a directory for holding changedetection's (volume) data - become: true - become_user: "{{ changedetection_username }}" - ansible.builtin.file: - path: "/home/{{ changedetection_username }}/changedetection_data" - state: directory - mode: '0700' -- name: Gather facts on the changedetection container - become: true - become_user: "{{ changedetection_username }}" - containers.podman.podman_container_info: - name: changedetection-server - register: changedetection_server_container_info -- name: Start the changedetection container with correct systemd linking - when: "'no such container' in changedetection_server_container_info.stderr" - become: true - become_user: "{{ changedetection_username }}" - block: - - name: Start the changedetection container - containers.podman.podman_container: - name: changedetection-server - image: docker.io/dgtlmoon/changedetection.io:0.45.21 - restart_policy: always - publish: - - 127.0.0.1:5000:5000 - volumes: - - "/home/{{ changedetection_username }}/changedetection_data:/datastore" - state: stopped - # For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/ - generate_systemd: - path: "/home/{{ changedetection_username }}/.config/systemd/user/" - restart_policy: always - notify: Reload systemd (daemon-reload) - - name: Flush handlers - ansible.builtin.meta: flush_handlers - - name: Enable the newly created systemd service for user - ansible.builtin.systemd: - name: container-changedetection-server.service - state: started - enabled: true - scope: user + user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role +- name: Create the changedetection container + ansible.builtin.include_role: + name: podman-container + apply: + become: true + become_user: "{{ changedetection_username }}" + vars: + podman_container_name: changedetection-server + podman_container_image: docker.io/dgtlmoon/changedetection.io + podman_container_tag: "{{ changedetection_version }}" + podman_container_publish: + - 127.0.0.1:5000:5000 + podman_container_volumes: + - name: changedetection_data + mnt: /datastore - name: Include simple-reverse-proxy role ansible.builtin.include_role: name: simple-reverse-proxy diff --git a/roles/changedetection/vars/main/defaults.yml b/roles/changedetection/vars/main/defaults.yml index 163fffc..4bf41e1 100644 --- a/roles/changedetection/vars/main/defaults.yml +++ b/roles/changedetection/vars/main/defaults.yml @@ -1,2 +1,3 @@ --- changedetection_username: changedetection +changedetection_version: 0.45.21 diff --git a/roles/pi-hole/handlers/main.yml b/roles/pi-hole/handlers/main.yml index 9f207b2..2100479 100644 --- a/roles/pi-hole/handlers/main.yml +++ b/roles/pi-hole/handlers/main.yml @@ -9,9 +9,3 @@ ansible.builtin.systemd: name: ufw.service state: restarted -- name: Reload systemd (daemon-reload) - become: true - become_user: "{{ pi_hole_username }}" - ansible.builtin.systemd_service: - daemon_reload: true - scope: user diff --git a/roles/pi-hole/tasks/main.yml b/roles/pi-hole/tasks/main.yml index e84035d..594b9bd 100644 --- a/roles/pi-hole/tasks/main.yml +++ b/roles/pi-hole/tasks/main.yml @@ -5,84 +5,34 @@ vars: user_username: "{{ pi_hole_username }}" user_password: "{{ pi_hole_password }}" - user_start_podman_restart: true -- name: Create the /etc-pihole directory in the home directory (will be mounted to the container) - become: true - become_user: "{{ pi_hole_username }}" - ansible.builtin.file: - path: "/home/{{ pi_hole_username }}/etc-pihole" - state: directory - mode: '0700' - register: command_result - failed_when: - - command_result.changed == false - - command_result.rc != 0 - # This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume - # changes the user of the folder to a UID only known within the container. This command basically doesn't need to - # change anything at this point so we'll ignore the error for now. - - "'set_mode_if_different' not in command_result.module_stdout" -- name: Create the /etc-dnsmasq.d directory in the home directory (will be mounted to the container) - become: true - become_user: "{{ pi_hole_username }}" - ansible.builtin.file: - path: "/home/{{ pi_hole_username }}/etc-dnsmasq.d" - state: directory - mode: '0700' - failed_when: - - command_result.changed == false - - command_result.rc != 0 - # This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume - # changes the user of the folder to a UID only known within the container. This command basically doesn't need to - # change anything at this point so we'll ignore the error for now. - - "'set_mode_if_different' not in command_result.module_stdout" -- name: Gather facts on the pi-hole container - become: true - become_user: "{{ pi_hole_username }}" - containers.podman.podman_container_info: - name: pi-hole - register: pi_hole_container_info -- name: Start the pi-hole container with correct systemd linking - when: "'no such container' in pi_hole_container_info.stderr" - become: true - become_user: "{{ pi_hole_username }}" - block: - - name: Start the Pi hole container - containers.podman.podman_container: - name: pi-hole - image: docker.io/pihole/pihole:2024.03.2 - restart_policy: always - publish: - # It seems we can't use authbind in combination with Podman, see: https://github.com/containers/podman/issues/13426. - # Instead we'll map to a higher port number and install and use the ufw firewall to forward packets to the local port. - - 127.0.0.1:5053:53/tcp - - 127.0.0.1:5053:53/udp - - 127.0.0.1:8080:80 - hostname: "{{ ansible_facts['hostname'] }}" # Setting this will restart the container - env: - TZ: 'Europe/Amsterdam' - WEBPASSWORD: "{{ pi_hole_web_password }}" - # VIRTUAL_HOST: 'pi-hole.kleinendorst.info' - # FTLCONF_LOCAL_IPV4: "{{ ansible_facts['default_ipv4']['address'] }}" - PIHOLE_DNS_: 1.1.1.1;1.0.0.1 - DNSMASQ_USER: root - INTERFACE: tap0 - volumes: - - "/home/{{ pi_hole_username }}/etc-pihole:/etc/pihole" - - "/home/{{ pi_hole_username }}/etc-dnsmasq.d:/etc/dnsmasq.d" - state: stopped - # For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/ - generate_systemd: - path: "/home/{{ pi_hole_username }}/.config/systemd/user/" - restart_policy: always - notify: Reload systemd (daemon-reload) - - name: Flush handlers - ansible.builtin.meta: flush_handlers - - name: Enable the newly created systemd service for user - ansible.builtin.systemd: - name: container-pi-hole.service - state: started - enabled: true - scope: user + user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role +- name: Create the pi-hole container + ansible.builtin.include_role: + name: podman-container + apply: + become: true + become_user: "{{ pi_hole_username }}" + vars: + podman_container_name: pi-hole + podman_container_image: docker.io/pihole/pihole + podman_container_tag: "{{ pi_hole_version }}" + podman_container_publish: + - 127.0.0.1:5053:53/tcp + - 127.0.0.1:5053:53/udp + - 127.0.0.1:8080:80 + podman_container_volumes: + - name: etc-pihole + mnt: /etc/pihole + - name: etc-dnsmasq.d + mnt: /etc/dnsmasq.d + podman_container_env: + TZ: 'Europe/Amsterdam' + WEBPASSWORD: "{{ pi_hole_web_password }}" + # VIRTUAL_HOST: 'pi-hole.kleinendorst.info' + # FTLCONF_LOCAL_IPV4: "{{ ansible_facts['default_ipv4']['address'] }}" + PIHOLE_DNS_: 1.1.1.1;1.0.0.1 + DNSMASQ_USER: root + INTERFACE: tap0 - name: Install certificate for pi-hole.kleinendorst.info become: true ansible.builtin.command: diff --git a/roles/pi-hole/vars/main/defaults.yml b/roles/pi-hole/vars/main/defaults.yml index 762fd91..c6ad8ec 100644 --- a/roles/pi-hole/vars/main/defaults.yml +++ b/roles/pi-hole/vars/main/defaults.yml @@ -1,2 +1,3 @@ --- pi_hole_username: pi-hole +pi_hole_version: 2024.03.2 diff --git a/roles/actual/handlers/main.yml b/roles/podman-container/handlers/main.yml similarity index 100% rename from roles/actual/handlers/main.yml rename to roles/podman-container/handlers/main.yml diff --git a/roles/podman-container/tasks/main.yml b/roles/podman-container/tasks/main.yml new file mode 100644 index 0000000..ed81b87 --- /dev/null +++ b/roles/podman-container/tasks/main.yml @@ -0,0 +1,75 @@ +--- +- name: Run whoami + ansible.builtin.command: whoami + changed_when: false + register: whoami +- name: Register current user in a variable + ansible.builtin.set_fact: + container_user: "{{ whoami.stdout }}" +- name: Create mount directories + ansible.builtin.file: + path: "/home/{{ container_user }}/{{ item.name }}" + state: directory + mode: '0700' + loop: "{{ podman_container_volumes }}" + loop_control: + label: "{{ item.name }}" + index_var: index + register: command_result + failed_when: + # This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume + # changes the user of the folder to a UID only known within the container. This command basically doesn't need to + # change anything at this point so we'll ignore the error for now. + - "command_result.module_stdout is defined and'set_mode_if_different' not in command_result.module_stdout" +- name: Gather facts on the container + containers.podman.podman_container_info: + name: "{{ podman_container_name }}" + register: container_info +- name: Start the container with correct systemd linking + # ⬇️ Run either when the image doesn't exist or is outdated + when: container_info.containers | length == 0 or not container_info.containers[0]["Config"]["Image"] is match(".*:" + podman_container_tag) + block: + - name: Pull the requested image before removal of container + containers.podman.podman_image: + name: "{{ podman_container_image }}:{{ podman_container_tag }}" + state: present + - name: Make sure the container isn't present + containers.podman.podman_container: + name: "{{ podman_container_name }}" + state: absent + - name: Map volumes to Podman accepted list + ansible.builtin.set_fact: + volumes: "{{ volumes + ['/home/' + container_user + '/' + item.name + ':' + item.mnt] }}" + with_items: "{{ podman_container_volumes }}" + vars: + volumes: [] + - name: Start the container + containers.podman.podman_container: + name: "{{ podman_container_name }}" + image: "{{ podman_container_image }}:{{ podman_container_tag }}" + restart_policy: always + hostname: "{{ ansible_facts['hostname'] }}" + publish: "{{ podman_container_publish }}" + env: "{{ podman_container_env }}" + volumes: "{{ volumes }}" + state: stopped + # For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/ + generate_systemd: + path: "/home/{{ container_user }}/.config/systemd/user/" + restart_policy: always + notify: Reload systemd (daemon-reload) + - name: Flush handlers + ansible.builtin.meta: flush_handlers + # On restarts this seems to be a bit buggy. Some manual waiting might be necessary and running: + # systemctl --user daemon-reload && systemctl --user start {{ service_name }} + # The retry also doesn't work as of yet. Seems that doing a daemon-reload and then refreshing the task seems to work + - name: Enable the newly created systemd service for user + ansible.builtin.systemd: + name: "container-{{ podman_container_name }}.service" + state: started + enabled: true + scope: user + retries: 3 + delay: 3 + register: result + until: "'Error.EBUSY' not in result.msg" diff --git a/roles/podman-container/vars/main/defaults.yml b/roles/podman-container/vars/main/defaults.yml new file mode 100644 index 0000000..23c031c --- /dev/null +++ b/roles/podman-container/vars/main/defaults.yml @@ -0,0 +1,2 @@ +--- +podman_container_env: {}