Deduplicate Podman container logic with new role
This commit is contained in:
parent
bcf920053c
commit
4fb455c6b4
11 changed files with 141 additions and 179 deletions
|
|
@ -2,3 +2,5 @@
|
||||||
# Notice that domain names won't work on intial runs since the DNS service hosting the name
|
# Notice that domain names won't work on intial runs since the DNS service hosting the name
|
||||||
# is installed as part of the scripting contained within this repo.
|
# is installed as part of the scripting contained within this repo.
|
||||||
raspberry-pi-1.kleinendorst.info hostname=raspberry-pi-1
|
raspberry-pi-1.kleinendorst.info hostname=raspberry-pi-1
|
||||||
|
# ⬇️ Comment out the line above and uncomment this line when the pi-hole service isn't working correctly.
|
||||||
|
# 192.168.50.27 hostname=raspberry-pi-1
|
||||||
|
|
|
||||||
|
|
@ -5,52 +5,22 @@
|
||||||
vars:
|
vars:
|
||||||
user_username: "{{ actual_username }}"
|
user_username: "{{ actual_username }}"
|
||||||
user_password: "{{ actual_password }}"
|
user_password: "{{ actual_password }}"
|
||||||
user_start_podman_restart: true
|
user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role
|
||||||
- name: Create a directory for holding actual's (volume) data
|
- name: Create the actual container
|
||||||
become: true
|
ansible.builtin.include_role:
|
||||||
become_user: "{{ actual_username }}"
|
name: podman-container
|
||||||
ansible.builtin.file:
|
apply:
|
||||||
path: "/home/{{ actual_username }}/actual_data"
|
become: true
|
||||||
state: directory
|
become_user: "{{ actual_username }}"
|
||||||
mode: '0700'
|
vars:
|
||||||
- name: Gather facts on the actual container
|
podman_container_name: actual-server
|
||||||
become: true
|
podman_container_image: docker.io/actualbudget/actual-server
|
||||||
become_user: "{{ actual_username }}"
|
podman_container_tag: "{{ actual_version }}"
|
||||||
containers.podman.podman_container_info:
|
podman_container_publish:
|
||||||
name: actual-server
|
- 127.0.0.1:5006:5006
|
||||||
register: actual_server_container_info
|
podman_container_volumes:
|
||||||
- name: Start the actual container with correct systemd linking
|
- name: actual_data
|
||||||
when: not actual_server_container_info.containers[0]["Config"]["Image"] is match(".*:" + actual_version)
|
mnt: /data
|
||||||
become: true
|
|
||||||
become_user: "{{ actual_username }}"
|
|
||||||
block:
|
|
||||||
- name: Remove the actual container
|
|
||||||
containers.podman.podman_container:
|
|
||||||
name: actual-server
|
|
||||||
state: absent
|
|
||||||
- name: Start the Actual container
|
|
||||||
containers.podman.podman_container:
|
|
||||||
name: actual-server
|
|
||||||
image: "docker.io/actualbudget/actual-server:{{ actual_version }}"
|
|
||||||
restart_policy: always
|
|
||||||
publish:
|
|
||||||
- 127.0.0.1:5006:5006
|
|
||||||
volumes:
|
|
||||||
- "/home/{{ actual_username }}/actual_data:/data"
|
|
||||||
state: stopped
|
|
||||||
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
|
|
||||||
generate_systemd:
|
|
||||||
path: "/home/{{ actual_username }}/.config/systemd/user/"
|
|
||||||
restart_policy: always
|
|
||||||
notify: Reload systemd (daemon-reload)
|
|
||||||
- name: Flush handlers
|
|
||||||
ansible.builtin.meta: flush_handlers
|
|
||||||
- name: Enable the newly created systemd service for user
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
name: container-actual-server.service
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
scope: user
|
|
||||||
- name: Include simple-reverse-proxy role
|
- name: Include simple-reverse-proxy role
|
||||||
ansible.builtin.include_role:
|
ansible.builtin.include_role:
|
||||||
name: simple-reverse-proxy
|
name: simple-reverse-proxy
|
||||||
|
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
---
|
|
||||||
- name: Reload systemd (daemon-reload)
|
|
||||||
become: true
|
|
||||||
become_user: "{{ actual_username }}"
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
daemon_reload: true
|
|
||||||
scope: user
|
|
||||||
|
|
@ -5,48 +5,22 @@
|
||||||
vars:
|
vars:
|
||||||
user_username: "{{ changedetection_username }}"
|
user_username: "{{ changedetection_username }}"
|
||||||
user_password: "{{ changedetection_password }}"
|
user_password: "{{ changedetection_password }}"
|
||||||
user_start_podman_restart: true
|
user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role
|
||||||
- name: Create a directory for holding changedetection's (volume) data
|
- name: Create the changedetection container
|
||||||
become: true
|
ansible.builtin.include_role:
|
||||||
become_user: "{{ changedetection_username }}"
|
name: podman-container
|
||||||
ansible.builtin.file:
|
apply:
|
||||||
path: "/home/{{ changedetection_username }}/changedetection_data"
|
become: true
|
||||||
state: directory
|
become_user: "{{ changedetection_username }}"
|
||||||
mode: '0700'
|
vars:
|
||||||
- name: Gather facts on the changedetection container
|
podman_container_name: changedetection-server
|
||||||
become: true
|
podman_container_image: docker.io/dgtlmoon/changedetection.io
|
||||||
become_user: "{{ changedetection_username }}"
|
podman_container_tag: "{{ changedetection_version }}"
|
||||||
containers.podman.podman_container_info:
|
podman_container_publish:
|
||||||
name: changedetection-server
|
- 127.0.0.1:5000:5000
|
||||||
register: changedetection_server_container_info
|
podman_container_volumes:
|
||||||
- name: Start the changedetection container with correct systemd linking
|
- name: changedetection_data
|
||||||
when: "'no such container' in changedetection_server_container_info.stderr"
|
mnt: /datastore
|
||||||
become: true
|
|
||||||
become_user: "{{ changedetection_username }}"
|
|
||||||
block:
|
|
||||||
- name: Start the changedetection container
|
|
||||||
containers.podman.podman_container:
|
|
||||||
name: changedetection-server
|
|
||||||
image: docker.io/dgtlmoon/changedetection.io:0.45.21
|
|
||||||
restart_policy: always
|
|
||||||
publish:
|
|
||||||
- 127.0.0.1:5000:5000
|
|
||||||
volumes:
|
|
||||||
- "/home/{{ changedetection_username }}/changedetection_data:/datastore"
|
|
||||||
state: stopped
|
|
||||||
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
|
|
||||||
generate_systemd:
|
|
||||||
path: "/home/{{ changedetection_username }}/.config/systemd/user/"
|
|
||||||
restart_policy: always
|
|
||||||
notify: Reload systemd (daemon-reload)
|
|
||||||
- name: Flush handlers
|
|
||||||
ansible.builtin.meta: flush_handlers
|
|
||||||
- name: Enable the newly created systemd service for user
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
name: container-changedetection-server.service
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
scope: user
|
|
||||||
- name: Include simple-reverse-proxy role
|
- name: Include simple-reverse-proxy role
|
||||||
ansible.builtin.include_role:
|
ansible.builtin.include_role:
|
||||||
name: simple-reverse-proxy
|
name: simple-reverse-proxy
|
||||||
|
|
|
||||||
|
|
@ -1,2 +1,3 @@
|
||||||
---
|
---
|
||||||
changedetection_username: changedetection
|
changedetection_username: changedetection
|
||||||
|
changedetection_version: 0.45.21
|
||||||
|
|
|
||||||
|
|
@ -9,9 +9,3 @@
|
||||||
ansible.builtin.systemd:
|
ansible.builtin.systemd:
|
||||||
name: ufw.service
|
name: ufw.service
|
||||||
state: restarted
|
state: restarted
|
||||||
- name: Reload systemd (daemon-reload)
|
|
||||||
become: true
|
|
||||||
become_user: "{{ pi_hole_username }}"
|
|
||||||
ansible.builtin.systemd_service:
|
|
||||||
daemon_reload: true
|
|
||||||
scope: user
|
|
||||||
|
|
|
||||||
|
|
@ -5,84 +5,34 @@
|
||||||
vars:
|
vars:
|
||||||
user_username: "{{ pi_hole_username }}"
|
user_username: "{{ pi_hole_username }}"
|
||||||
user_password: "{{ pi_hole_password }}"
|
user_password: "{{ pi_hole_password }}"
|
||||||
user_start_podman_restart: true
|
user_start_podman_restart: true # TODO: Remove this and move it to the podman-container role
|
||||||
- name: Create the /etc-pihole directory in the home directory (will be mounted to the container)
|
- name: Create the pi-hole container
|
||||||
become: true
|
ansible.builtin.include_role:
|
||||||
become_user: "{{ pi_hole_username }}"
|
name: podman-container
|
||||||
ansible.builtin.file:
|
apply:
|
||||||
path: "/home/{{ pi_hole_username }}/etc-pihole"
|
become: true
|
||||||
state: directory
|
become_user: "{{ pi_hole_username }}"
|
||||||
mode: '0700'
|
vars:
|
||||||
register: command_result
|
podman_container_name: pi-hole
|
||||||
failed_when:
|
podman_container_image: docker.io/pihole/pihole
|
||||||
- command_result.changed == false
|
podman_container_tag: "{{ pi_hole_version }}"
|
||||||
- command_result.rc != 0
|
podman_container_publish:
|
||||||
# This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume
|
- 127.0.0.1:5053:53/tcp
|
||||||
# changes the user of the folder to a UID only known within the container. This command basically doesn't need to
|
- 127.0.0.1:5053:53/udp
|
||||||
# change anything at this point so we'll ignore the error for now.
|
- 127.0.0.1:8080:80
|
||||||
- "'set_mode_if_different' not in command_result.module_stdout"
|
podman_container_volumes:
|
||||||
- name: Create the /etc-dnsmasq.d directory in the home directory (will be mounted to the container)
|
- name: etc-pihole
|
||||||
become: true
|
mnt: /etc/pihole
|
||||||
become_user: "{{ pi_hole_username }}"
|
- name: etc-dnsmasq.d
|
||||||
ansible.builtin.file:
|
mnt: /etc/dnsmasq.d
|
||||||
path: "/home/{{ pi_hole_username }}/etc-dnsmasq.d"
|
podman_container_env:
|
||||||
state: directory
|
TZ: 'Europe/Amsterdam'
|
||||||
mode: '0700'
|
WEBPASSWORD: "{{ pi_hole_web_password }}"
|
||||||
failed_when:
|
# VIRTUAL_HOST: 'pi-hole.kleinendorst.info'
|
||||||
- command_result.changed == false
|
# FTLCONF_LOCAL_IPV4: "{{ ansible_facts['default_ipv4']['address'] }}"
|
||||||
- command_result.rc != 0
|
PIHOLE_DNS_: 1.1.1.1;1.0.0.1
|
||||||
# This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume
|
DNSMASQ_USER: root
|
||||||
# changes the user of the folder to a UID only known within the container. This command basically doesn't need to
|
INTERFACE: tap0
|
||||||
# change anything at this point so we'll ignore the error for now.
|
|
||||||
- "'set_mode_if_different' not in command_result.module_stdout"
|
|
||||||
- name: Gather facts on the pi-hole container
|
|
||||||
become: true
|
|
||||||
become_user: "{{ pi_hole_username }}"
|
|
||||||
containers.podman.podman_container_info:
|
|
||||||
name: pi-hole
|
|
||||||
register: pi_hole_container_info
|
|
||||||
- name: Start the pi-hole container with correct systemd linking
|
|
||||||
when: "'no such container' in pi_hole_container_info.stderr"
|
|
||||||
become: true
|
|
||||||
become_user: "{{ pi_hole_username }}"
|
|
||||||
block:
|
|
||||||
- name: Start the Pi hole container
|
|
||||||
containers.podman.podman_container:
|
|
||||||
name: pi-hole
|
|
||||||
image: docker.io/pihole/pihole:2024.03.2
|
|
||||||
restart_policy: always
|
|
||||||
publish:
|
|
||||||
# It seems we can't use authbind in combination with Podman, see: https://github.com/containers/podman/issues/13426.
|
|
||||||
# Instead we'll map to a higher port number and install and use the ufw firewall to forward packets to the local port.
|
|
||||||
- 127.0.0.1:5053:53/tcp
|
|
||||||
- 127.0.0.1:5053:53/udp
|
|
||||||
- 127.0.0.1:8080:80
|
|
||||||
hostname: "{{ ansible_facts['hostname'] }}" # Setting this will restart the container
|
|
||||||
env:
|
|
||||||
TZ: 'Europe/Amsterdam'
|
|
||||||
WEBPASSWORD: "{{ pi_hole_web_password }}"
|
|
||||||
# VIRTUAL_HOST: 'pi-hole.kleinendorst.info'
|
|
||||||
# FTLCONF_LOCAL_IPV4: "{{ ansible_facts['default_ipv4']['address'] }}"
|
|
||||||
PIHOLE_DNS_: 1.1.1.1;1.0.0.1
|
|
||||||
DNSMASQ_USER: root
|
|
||||||
INTERFACE: tap0
|
|
||||||
volumes:
|
|
||||||
- "/home/{{ pi_hole_username }}/etc-pihole:/etc/pihole"
|
|
||||||
- "/home/{{ pi_hole_username }}/etc-dnsmasq.d:/etc/dnsmasq.d"
|
|
||||||
state: stopped
|
|
||||||
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
|
|
||||||
generate_systemd:
|
|
||||||
path: "/home/{{ pi_hole_username }}/.config/systemd/user/"
|
|
||||||
restart_policy: always
|
|
||||||
notify: Reload systemd (daemon-reload)
|
|
||||||
- name: Flush handlers
|
|
||||||
ansible.builtin.meta: flush_handlers
|
|
||||||
- name: Enable the newly created systemd service for user
|
|
||||||
ansible.builtin.systemd:
|
|
||||||
name: container-pi-hole.service
|
|
||||||
state: started
|
|
||||||
enabled: true
|
|
||||||
scope: user
|
|
||||||
- name: Install certificate for pi-hole.kleinendorst.info
|
- name: Install certificate for pi-hole.kleinendorst.info
|
||||||
become: true
|
become: true
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
|
|
|
||||||
|
|
@ -1,2 +1,3 @@
|
||||||
---
|
---
|
||||||
pi_hole_username: pi-hole
|
pi_hole_username: pi-hole
|
||||||
|
pi_hole_version: 2024.03.2
|
||||||
|
|
|
||||||
75
roles/podman-container/tasks/main.yml
Normal file
75
roles/podman-container/tasks/main.yml
Normal file
|
|
@ -0,0 +1,75 @@
|
||||||
|
---
|
||||||
|
- name: Run whoami
|
||||||
|
ansible.builtin.command: whoami
|
||||||
|
changed_when: false
|
||||||
|
register: whoami
|
||||||
|
- name: Register current user in a variable
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
container_user: "{{ whoami.stdout }}"
|
||||||
|
- name: Create mount directories
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/{{ container_user }}/{{ item.name }}"
|
||||||
|
state: directory
|
||||||
|
mode: '0700'
|
||||||
|
loop: "{{ podman_container_volumes }}"
|
||||||
|
loop_control:
|
||||||
|
label: "{{ item.name }}"
|
||||||
|
index_var: index
|
||||||
|
register: command_result
|
||||||
|
failed_when:
|
||||||
|
# This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume
|
||||||
|
# changes the user of the folder to a UID only known within the container. This command basically doesn't need to
|
||||||
|
# change anything at this point so we'll ignore the error for now.
|
||||||
|
- "command_result.module_stdout is defined and'set_mode_if_different' not in command_result.module_stdout"
|
||||||
|
- name: Gather facts on the container
|
||||||
|
containers.podman.podman_container_info:
|
||||||
|
name: "{{ podman_container_name }}"
|
||||||
|
register: container_info
|
||||||
|
- name: Start the container with correct systemd linking
|
||||||
|
# ⬇️ Run either when the image doesn't exist or is outdated
|
||||||
|
when: container_info.containers | length == 0 or not container_info.containers[0]["Config"]["Image"] is match(".*:" + podman_container_tag)
|
||||||
|
block:
|
||||||
|
- name: Pull the requested image before removal of container
|
||||||
|
containers.podman.podman_image:
|
||||||
|
name: "{{ podman_container_image }}:{{ podman_container_tag }}"
|
||||||
|
state: present
|
||||||
|
- name: Make sure the container isn't present
|
||||||
|
containers.podman.podman_container:
|
||||||
|
name: "{{ podman_container_name }}"
|
||||||
|
state: absent
|
||||||
|
- name: Map volumes to Podman accepted list
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
volumes: "{{ volumes + ['/home/' + container_user + '/' + item.name + ':' + item.mnt] }}"
|
||||||
|
with_items: "{{ podman_container_volumes }}"
|
||||||
|
vars:
|
||||||
|
volumes: []
|
||||||
|
- name: Start the container
|
||||||
|
containers.podman.podman_container:
|
||||||
|
name: "{{ podman_container_name }}"
|
||||||
|
image: "{{ podman_container_image }}:{{ podman_container_tag }}"
|
||||||
|
restart_policy: always
|
||||||
|
hostname: "{{ ansible_facts['hostname'] }}"
|
||||||
|
publish: "{{ podman_container_publish }}"
|
||||||
|
env: "{{ podman_container_env }}"
|
||||||
|
volumes: "{{ volumes }}"
|
||||||
|
state: stopped
|
||||||
|
# For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/
|
||||||
|
generate_systemd:
|
||||||
|
path: "/home/{{ container_user }}/.config/systemd/user/"
|
||||||
|
restart_policy: always
|
||||||
|
notify: Reload systemd (daemon-reload)
|
||||||
|
- name: Flush handlers
|
||||||
|
ansible.builtin.meta: flush_handlers
|
||||||
|
# On restarts this seems to be a bit buggy. Some manual waiting might be necessary and running:
|
||||||
|
# systemctl --user daemon-reload && systemctl --user start {{ service_name }}
|
||||||
|
# The retry also doesn't work as of yet. Seems that doing a daemon-reload and then refreshing the task seems to work
|
||||||
|
- name: Enable the newly created systemd service for user
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: "container-{{ podman_container_name }}.service"
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
scope: user
|
||||||
|
retries: 3
|
||||||
|
delay: 3
|
||||||
|
register: result
|
||||||
|
until: "'Error.EBUSY' not in result.msg"
|
||||||
2
roles/podman-container/vars/main/defaults.yml
Normal file
2
roles/podman-container/vars/main/defaults.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
---
|
||||||
|
podman_container_env: {}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue