From 10c5b7836aafc6d9712dc513b7d37a0703386bd7 Mon Sep 17 00:00:00 2001 From: Thomas Kleinendorst Date: Fri, 27 Dec 2024 13:18:52 +0100 Subject: [PATCH] Delete podman-container role Not used since we're using Docker instead. --- roles/podman-container/handlers/main.yml | 7 -- roles/podman-container/tasks/main.yml | 91 ------------------- roles/podman-container/vars/main/defaults.yml | 4 - 3 files changed, 102 deletions(-) delete mode 100644 roles/podman-container/handlers/main.yml delete mode 100644 roles/podman-container/tasks/main.yml delete mode 100644 roles/podman-container/vars/main/defaults.yml diff --git a/roles/podman-container/handlers/main.yml b/roles/podman-container/handlers/main.yml deleted file mode 100644 index 94809df..0000000 --- a/roles/podman-container/handlers/main.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Reload systemd (daemon-reload) - become: true - become_user: "{{ container_user }}" - ansible.builtin.systemd_service: - daemon_reload: true - scope: user diff --git a/roles/podman-container/tasks/main.yml b/roles/podman-container/tasks/main.yml deleted file mode 100644 index 230dfb9..0000000 --- a/roles/podman-container/tasks/main.yml +++ /dev/null @@ -1,91 +0,0 @@ ---- -# This role assumes currently that the target container must be restarted on a system reboot. -- name: Ensure the podman-restart.service is running - ansible.builtin.systemd: - name: podman-restart.service - state: started - enabled: true - scope: user -- name: Run whoami - ansible.builtin.command: whoami - changed_when: false - register: whoami -- name: Register current user in a variable - ansible.builtin.set_fact: - container_user: "{{ whoami.stdout }}" -- name: Set default podman_simple_container_volumes if not provided - ansible.builtin.set_fact: - podman_simple_container_volumes: "{{ podman_simple_container_volumes | default([]) }}" -- name: Create mount directories - ansible.builtin.file: - path: "/home/{{ container_user }}/{{ item.name }}" - state: directory - mode: '0700' - loop: "{{ podman_simple_container_volumes }}" - loop_control: - label: "{{ item.name }}" - index_var: index - register: command_result - failed_when: - # This is quite an interesting problem. The command fails because, after initial creation, the pod using the volume - # changes the user of the folder to a UID only known within the container. This command basically doesn't need to - # change anything at this point so we'll ignore the error for now. - - "command_result.module_stdout is defined and'set_mode_if_different' not in command_result.module_stdout" -- name: Gather facts on the container - containers.podman.podman_container_info: - name: "{{ podman_container_name }}" - register: container_info -- name: Start the container with correct systemd linking - # ⬇️ Run either when the image doesn't exist or is outdated - when: container_info.containers | length == 0 or not container_info.containers[0]["Config"]["Image"] is match(".*:" + podman_container_tag) - block: - # The image would already be downloaded in the step where the container is started (if the image is missing from the system). However, - # this specifically fixes an issue where the DNS server is temporarily unavailable when the pi-hole container is removed first and then recreated. - # By ensuring that the correct image is already on the machine it doesn't fail when downloading because it can't resolve the address. - - name: Pull the requested image before removal of container - containers.podman.podman_image: - name: "{{ podman_container_image }}:{{ podman_container_tag }}" - state: present - - name: Make sure the container isn't present - containers.podman.podman_container: - name: "{{ podman_container_name }}" - state: absent - - name: Make sure the systemd unit file isn't present - ansible.builtin.file: - path: "/home/{{ container_user }}/.config/systemd/user/container-{{ podman_container_name }}.service" - state: absent - notify: Reload systemd (daemon-reload) - - name: Flush handlers - ansible.builtin.meta: flush_handlers - - name: Initialize container_volumes - ansible.builtin.set_fact: - container_volumes: "{{ podman_container_volumes }}" - - name: Map volumes to Podman accepted list - ansible.builtin.set_fact: - container_volumes: "{{ container_volumes | default(podman_container_volumes) + ['/home/' + container_user + '/' + item.name + ':' + item.mnt] }}" - loop: "{{ podman_simple_container_volumes }}" - - name: Start the container - containers.podman.podman_container: - name: "{{ podman_container_name }}" - image: "{{ podman_container_image }}:{{ podman_container_tag }}" - restart_policy: always - user: root # Still isolated from host system 👍 - command: "{{ podman_container_command }}" - hostname: "{{ ansible_facts['hostname'] }}" - publish: "{{ podman_container_publish }}" - env: "{{ podman_container_env }}" - volumes: "{{ container_volumes }}" - state: stopped - # For more information on the systemd startup service, see: https://linuxhandbook.com/autostart-podman-containers/ - generate_systemd: - path: "/home/{{ container_user }}/.config/systemd/user/" - restart_policy: always - notify: Reload systemd (daemon-reload) - - name: Flush handlers - ansible.builtin.meta: flush_handlers - - name: Enable the newly created systemd service for user - ansible.builtin.systemd: - name: "container-{{ podman_container_name }}.service" - state: started - enabled: true - scope: user diff --git a/roles/podman-container/vars/main/defaults.yml b/roles/podman-container/vars/main/defaults.yml deleted file mode 100644 index 8246023..0000000 --- a/roles/podman-container/vars/main/defaults.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -podman_container_env: {} -podman_container_volumes: [] -podman_container_command: []