diff --git a/roles/actual/handlers/main.yml b/roles/actual/handlers/main.yml index 2100479..3a63107 100644 --- a/roles/actual/handlers/main.yml +++ b/roles/actual/handlers/main.yml @@ -9,3 +9,9 @@ ansible.builtin.systemd: name: ufw.service state: restarted +- name: Reload systemd (daemon-reload) + become: true + become_user: "{{ actual_username }}" + ansible.builtin.systemd_service: + daemon_reload: true + scope: user diff --git a/roles/actual/tasks/main.yml b/roles/actual/tasks/main.yml index c87e418..a727558 100644 --- a/roles/actual/tasks/main.yml +++ b/roles/actual/tasks/main.yml @@ -12,23 +12,48 @@ path: "/home/{{ actual_username }}/actual_data" state: directory mode: '0700' -# Unfortunatelly I can't set the web password in the container, a user has to manually do this on startup. -- name: Start the Actual container +- name: Start the podman-restart.service become: true become_user: "{{ actual_username }}" - containers.podman.podman_container: - name: actual-server - image: docker.io/actualbudget/actual-server:24.4.0 - # TODO: Enable containers on boot - # I expected podman containers to restart on boot with this policy but apparently the documentation specifically - # states that they won't do this. There seems to be an involved workaround to get this to work whilst keeping the - # containers "rootless". See this guide: https://linuxhandbook.com/autostart-podman-containers/ - restart_policy: on-failure - publish: - - 127.0.0.1:5006:5006 - volumes: - - "/home/{{ actual_username }}/actual_data:/data" + ansible.builtin.systemd: + name: podman-restart.service state: started + enabled: true + scope: user +- name: Gather facts on the actual container + become: true + become_user: "{{ actual_username }}" + containers.podman.podman_container_info: + name: actual-server + register: actual_server_container_info +- name: Start the actual container with correct systemd linking + when: "'no such container' in actual_server_container_info.stderr" + become: true + become_user: "{{ actual_username }}" + block: + - name: Start the Actual container + containers.podman.podman_container: + name: actual-server + image: docker.io/actualbudget/actual-server:24.4.0 + restart_policy: always + publish: + - 127.0.0.1:5006:5006 + volumes: + - "/home/{{ actual_username }}/actual_data:/data" + state: stopped + recreate: true + generate_systemd: + path: "/home/{{ actual_username }}/.config/systemd/user/" + restart_policy: always + notify: Reload systemd (daemon-reload) + - name: Flush handlers + ansible.builtin.meta: flush_handlers + - name: Enable the newly created systemd service for user + ansible.builtin.systemd: + name: container-actual-server.service + state: started + enabled: true + scope: user - name: Install certificate for actual.kleinendorst.info become: true ansible.builtin.command: diff --git a/roles/pi-hole/files/dns_foward.conf b/roles/pi-hole/files/dns_foward.conf deleted file mode 100644 index a324be6..0000000 --- a/roles/pi-hole/files/dns_foward.conf +++ /dev/null @@ -1,7 +0,0 @@ -stream { - server { - listen 53 udp; - proxy_pass 127.0.0.1:5053; - proxy_responses 0; - } - } diff --git a/roles/user/tasks/main.yml b/roles/user/tasks/main.yml index ed9b1e2..008ea21 100644 --- a/roles/user/tasks/main.yml +++ b/roles/user/tasks/main.yml @@ -88,6 +88,10 @@ # Add Snapcraft to $PATH export PATH=$PATH:/snap/bin + # Set XDG_RUNTIME_DIR variable necessary for running systemctl as user + # See: https://superuser.com/questions/1561076/systemctl-user-failed-to-connect-to-bus-no-such-file-or-directory-debian-9#answers-header + export XDG_RUNTIME_DIR=/run/user/$(id -u $otherUser) + # Starship eval "$(starship init zsh)" - name: Change the default shell of the current user