Deduplicate Podman container logic with new role

This commit is contained in:
Thomas Kleinendorst 2024-06-05 12:57:41 +02:00
parent bcf920053c
commit 4fb455c6b4
11 changed files with 141 additions and 179 deletions

View file

@ -2,3 +2,5 @@
# Notice that domain names won't work on intial runs since the DNS service hosting the name
# is installed as part of the scripting contained within this repo.
raspberry-pi-1.kleinendorst.info hostname=raspberry-pi-1
# ⬇️ Comment out the line above and uncomment this line when the pi-hole service isn't working correctly.
# 192.168.50.27 hostname=raspberry-pi-1

View file

@ -5,52 +5,22 @@
vars:
user_username: "{{ actual_username }}"
user_password: "{{ actual_password }}"
user_start_podman_restart: true
- name: Create a directory for holding actual's (volume) data
user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role
- name: Create the actual container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ actual_username }}"
ansible.builtin.file:
path: "/home/{{ actual_username }}/actual_data"
state: directory
mode: '0700'
- name: Gather facts on the actual container
become: true
become_user: "{{ actual_username }}"
containers.podman.podman_container_info:
name: actual-server
register: actual_server_container_info
- name: Start the actual container with correct systemd linking
when: not actual_server_container_info.containers[0]["Config"]["Image"] is match(".*:" + actual_version)
become: true
become_user: "{{ actual_username }}"
block:
- name: Remove the actual container
containers.podman.podman_container:
name: actual-server
state: absent
- name: Start the Actual container
containers.podman.podman_container:
name: actual-server
image: "docker.io/actualbudget/actual-server:{{ actual_version }}"
restart_policy: always
publish:
vars:
podman_container_name: actual-server
podman_container_image: docker.io/actualbudget/actual-server
podman_container_tag: "{{ actual_version }}"
podman_container_publish:
- 127.0.0.1:5006:5006
volumes:
- "/home/{{ actual_username }}/actual_data:/data"
state: stopped
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
generate_systemd:
path: "/home/{{ actual_username }}/.config/systemd/user/"
restart_policy: always
notify: Reload systemd (daemon-reload)
- name: Flush handlers
ansible.builtin.meta: flush_handlers
- name: Enable the newly created systemd service for user
ansible.builtin.systemd:
name: container-actual-server.service
state: started
enabled: true
scope: user
podman_container_volumes:
- name: actual_data
mnt: /data
- name: Include simple-reverse-proxy role
ansible.builtin.include_role:
name: simple-reverse-proxy

View file

@ -1,7 +0,0 @@
---
- name: Reload systemd (daemon-reload)
become: true
become_user: "{{ actual_username }}"
ansible.builtin.systemd_service:
daemon_reload: true
scope: user

View file

@ -5,48 +5,22 @@
vars:
user_username: "{{ changedetection_username }}"
user_password: "{{ changedetection_password }}"
user_start_podman_restart: true
- name: Create a directory for holding changedetection's (volume) data
user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role
- name: Create the changedetection container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ changedetection_username }}"
ansible.builtin.file:
path: "/home/{{ changedetection_username }}/changedetection_data"
state: directory
mode: '0700'
- name: Gather facts on the changedetection container
become: true
become_user: "{{ changedetection_username }}"
containers.podman.podman_container_info:
name: changedetection-server
register: changedetection_server_container_info
- name: Start the changedetection container with correct systemd linking
when: "'no such container' in changedetection_server_container_info.stderr"
become: true
become_user: "{{ changedetection_username }}"
block:
- name: Start the changedetection container
containers.podman.podman_container:
name: changedetection-server
image: docker.io/dgtlmoon/changedetection.io:0.45.21
restart_policy: always
publish:
vars:
podman_container_name: changedetection-server
podman_container_image: docker.io/dgtlmoon/changedetection.io
podman_container_tag: "{{ changedetection_version }}"
podman_container_publish:
- 127.0.0.1:5000:5000
volumes:
- "/home/{{ changedetection_username }}/changedetection_data:/datastore"
state: stopped
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
generate_systemd:
path: "/home/{{ changedetection_username }}/.config/systemd/user/"
restart_policy: always
notify: Reload systemd (daemon-reload)
- name: Flush handlers
ansible.builtin.meta: flush_handlers
- name: Enable the newly created systemd service for user
ansible.builtin.systemd:
name: container-changedetection-server.service
state: started
enabled: true
scope: user
podman_container_volumes:
- name: changedetection_data
mnt: /datastore
- name: Include simple-reverse-proxy role
ansible.builtin.include_role:
name: simple-reverse-proxy

View file

@ -1,2 +1,3 @@
---
changedetection_username: changedetection
changedetection_version: 0.45.21

View file

@ -9,9 +9,3 @@
ansible.builtin.systemd:
name: ufw.service
state: restarted
- name: Reload systemd (daemon-reload)
become: true
become_user: "{{ pi_hole_username }}"
ansible.builtin.systemd_service:
daemon_reload: true
scope: user

View file

@ -5,60 +5,27 @@
vars:
user_username: "{{ pi_hole_username }}"
user_password: "{{ pi_hole_password }}"
user_start_podman_restart: true
- name: Create the /etc-pihole directory in the home directory (will be mounted to the container)
user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role
- name: Create the pi-hole container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ pi_hole_username }}"
ansible.builtin.file:
path: "/home/{{ pi_hole_username }}/etc-pihole"
state: directory
mode: '0700'
register: command_result
failed_when:
- command_result.changed == false
- command_result.rc != 0
# This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume
# changes the user of the folder to a UID only known within the container. This command basically doesn't need to
# change anything at this point so we'll ignore the error for now.
- "'set_mode_if_different' not in command_result.module_stdout"
- name: Create the /etc-dnsmasq.d directory in the home directory (will be mounted to the container)
become: true
become_user: "{{ pi_hole_username }}"
ansible.builtin.file:
path: "/home/{{ pi_hole_username }}/etc-dnsmasq.d"
state: directory
mode: '0700'
failed_when:
- command_result.changed == false
- command_result.rc != 0
# This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume
# changes the user of the folder to a UID only known within the container. This command basically doesn't need to
# change anything at this point so we'll ignore the error for now.
- "'set_mode_if_different' not in command_result.module_stdout"
- name: Gather facts on the pi-hole container
become: true
become_user: "{{ pi_hole_username }}"
containers.podman.podman_container_info:
name: pi-hole
register: pi_hole_container_info
- name: Start the pi-hole container with correct systemd linking
when: "'no such container' in pi_hole_container_info.stderr"
become: true
become_user: "{{ pi_hole_username }}"
block:
- name: Start the Pi hole container
containers.podman.podman_container:
name: pi-hole
image: docker.io/pihole/pihole:2024.03.2
restart_policy: always
publish:
# It seems we can't use authbind in combination with Podman, see: https://github.com/containers/podman/issues/13426.
# Instead we'll map to a higher port number and install and use the ufw firewall to forward packets to the local port.
vars:
podman_container_name: pi-hole
podman_container_image: docker.io/pihole/pihole
podman_container_tag: "{{ pi_hole_version }}"
podman_container_publish:
- 127.0.0.1:5053:53/tcp
- 127.0.0.1:5053:53/udp
- 127.0.0.1:8080:80
hostname: "{{ ansible_facts['hostname'] }}" # Setting this will restart the container
env:
podman_container_volumes:
- name: etc-pihole
mnt: /etc/pihole
- name: etc-dnsmasq.d
mnt: /etc/dnsmasq.d
podman_container_env:
TZ: 'Europe/Amsterdam'
WEBPASSWORD: "{{ pi_hole_web_password }}"
# VIRTUAL_HOST: 'pi-hole.kleinendorst.info'
@ -66,23 +33,6 @@
PIHOLE_DNS_: 1.1.1.1;1.0.0.1
DNSMASQ_USER: root
INTERFACE: tap0
volumes:
- "/home/{{ pi_hole_username }}/etc-pihole:/etc/pihole"
- "/home/{{ pi_hole_username }}/etc-dnsmasq.d:/etc/dnsmasq.d"
state: stopped
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
generate_systemd:
path: "/home/{{ pi_hole_username }}/.config/systemd/user/"
restart_policy: always
notify: Reload systemd (daemon-reload)
- name: Flush handlers
ansible.builtin.meta: flush_handlers
- name: Enable the newly created systemd service for user
ansible.builtin.systemd:
name: container-pi-hole.service
state: started
enabled: true
scope: user
- name: Install certificate for pi-hole.kleinendorst.info
become: true
ansible.builtin.command:

View file

@ -1,2 +1,3 @@
---
pi_hole_username: pi-hole
pi_hole_version: 2024.03.2

View file

@ -0,0 +1,75 @@
---
- name: Run whoami
ansible.builtin.command: whoami
changed_when: false
register: whoami
- name: Register current user in a variable
ansible.builtin.set_fact:
container_user: "{{ whoami.stdout }}"
- name: Create mount directories
ansible.builtin.file:
path: "/home/{{ container_user }}/{{ item.name }}"
state: directory
mode: '0700'
loop: "{{ podman_container_volumes }}"
loop_control:
label: "{{ item.name }}"
index_var: index
register: command_result
failed_when:
# This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume
# changes the user of the folder to a UID only known within the container. This command basically doesn't need to
# change anything at this point so we'll ignore the error for now.
- "command_result.module_stdout is defined and'set_mode_if_different' not in command_result.module_stdout"
- name: Gather facts on the container
containers.podman.podman_container_info:
name: "{{ podman_container_name }}"
register: container_info
- name: Start the container with correct systemd linking
# ⬇️ Run either when the image doesn't exist or is outdated
when: container_info.containers | length == 0 or not container_info.containers[0]["Config"]["Image"] is match(".*:" + podman_container_tag)
block:
- name: Pull the requested image before removal of container
containers.podman.podman_image:
name: "{{ podman_container_image }}:{{ podman_container_tag }}"
state: present
- name: Make sure the container isn't present
containers.podman.podman_container:
name: "{{ podman_container_name }}"
state: absent
- name: Map volumes to Podman accepted list
ansible.builtin.set_fact:
volumes: "{{ volumes + ['/home/' + container_user + '/' + item.name + ':' + item.mnt] }}"
with_items: "{{ podman_container_volumes }}"
vars:
volumes: []
- name: Start the container
containers.podman.podman_container:
name: "{{ podman_container_name }}"
image: "{{ podman_container_image }}:{{ podman_container_tag }}"
restart_policy: always
hostname: "{{ ansible_facts['hostname'] }}"
publish: "{{ podman_container_publish }}"
env: "{{ podman_container_env }}"
volumes: "{{ volumes }}"
state: stopped
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
generate_systemd:
path: "/home/{{ container_user }}/.config/systemd/user/"
restart_policy: always
notify: Reload systemd (daemon-reload)
- name: Flush handlers
ansible.builtin.meta: flush_handlers
# On restarts this seems to be a bit buggy. Some manual waiting might be necessary and running:
# systemctl --user daemon-reload && systemctl --user start {{ service_name }}
# The retry also doesn't work as of yet. Seems that doing a daemon-reload and then refreshing the task seems to work
- name: Enable the newly created systemd service for user
ansible.builtin.systemd:
name: "container-{{ podman_container_name }}.service"
state: started
enabled: true
scope: user
retries: 3
delay: 3
register: result
until: "'Error.EBUSY' not in result.msg"

View file

@ -0,0 +1,2 @@
---
podman_container_env: {}