Merge pull request #1 from Kleinendorst/reinstall

Reinstall the Raspberry Pi by using Docker instead of Podman
This commit is contained in:
Thomas Kleinendorst 2024-11-21 16:17:25 +01:00 committed by GitHub
commit 5794ef6625
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 208 additions and 384 deletions

View file

@ -17,6 +17,11 @@ The files within this repository should be run on a computer with Ansible instal
# Notice the space at the beginning, this prevents the shell from saving this command in its history.
echo '[ -- enter vault pass here -- ]' > .vault_pass
```
4. Install the Python3 passlib library (used internally in the user role);
```bash
sudo apt install python3-passlib
```
### Environment prerequisites
The Raspberry Pi IaC code contained within this repository provisions the Raspberry Pi itself but doesn't provision all surrounding infrastructure which is presumed to be managed by hand. The following relevant configuration is assumed:
@ -31,12 +36,12 @@ The Raspberry Pi should be installed and running with reachable SSH from the net
2. When asked: **Would you like to apply OS customisation settings?** select **EDIT SETTINGS**. Select and fill in the following settings:
1. **Set username and password**
2. **Set locale settings**
3. **Enable SSH** > **Use password authentication** (we'll harden it later to use public keys).
3. **Enable SSH** > **Allow public-key authentication only** and enter your computer's public key.
4. Disable **Eject media when finished** (probably not really important but I heard it could prevent problems on Windows).
3. Start the Raspberry Pi with an ethernet cable attached.
4. Find the assigned IP of the Raspberry Pi in the [router](http://asusrouter.com/) and configure DHCP to statically asign this address to the Raspberry Pi.
5. Add the new Raspberry Pi to the *hosts* file using the internal IP.
6. Test if the Raspberry Pi is correctly configured by opening an SSH session to it (using its IP address). If this works the next step is to [add SSH public keys for each computer that should provision/connect to the Raspberry Pi](https://linuxhandbook.com/add-ssh-public-key-to-server/). **It's important to perform this step before provisioning because that will disallow logging into SSH with a password.**
5. Add the new Raspberry Pi to the *hosts* file using the internal IP if it isn't there already.
6. Test if the Raspberry Pi is correctly configured by opening an SSH session to it (using its IP address).
## Provisioning
Provision the Raspberry Pi by running:
@ -61,6 +66,8 @@ For the next step remove the current *known_hosts* entry with: `ssh-keygen -R '1
In the router settings the Raspberry Pi is configured as the primary DNS server. When reinstalling the Pi this breaks the network. When reinstalling the Pi revert to the default DNS provider in the
router by navigating to [it's website](http://asusrouter.com/Advanced_DHCP_Content.asp) and clearing the DNS Server 1 field and applying these settings.
Also make sure to alter the **inventory/hosts** file to allow Ansible to connect using the Raspberry Pi's IP address rather than it's hostname (which isn't reachable at this point).
After installing the Raspberry Pi it can be added again.
### Debugging users other than the main user

View file

@ -11,22 +11,27 @@
# Notice that this role changes some settings on reruns (on the "Change various sysctl-settings" task), doesn't seem problematic though.
- role: devsec.hardening.ssh_hardening
become: true
- role: geerlingguy.docker
become: true
- role: hostname
- role: basic-intalls
- role: packages
- role: user
- role: cloudflare-ddns
- role: cloudflared
- role: nginx
- role: actual
- role: changedetection
- role: pi-hole
- role: monitoring
- role: actual
- role: postgres
- role: wedding
- role: changedetection
- role: monitoring
vars:
# devsec.hardening.ssh_hardening vars:
ssh_client_port: 22 # Default, but duplicated here for documentation purpose. Not changed because its only accessible via LAN.
ssh_client_password_login: false # Default, but duplicated here for documentation purpose.
# geerlingguy.docker vars:
docker_edition: 'ce'
docker_install_compose_plugin: true
tasks:
# This task can be handy for debugging gathered facts, uncomment it if necessary:
# - name: Store gathered facts in local file

View file

@ -2,7 +2,10 @@
collections:
# See: https://galaxy.ansible.com/ui/repo/published/devsec/hardening/
- name: devsec.hardening
version: 9.0.1
version: 10.1.0
# See: https://prometheus-community.github.io/ansible/branch/main/prometheus_role.html#ansible-collections-prometheus-prometheus-prometheus-role
- name: prometheus.prometheus
version: 0.17.1
version: 0.23.0
roles:
- name: geerlingguy.docker
version: 7.4.1

View file

@ -1,25 +1,19 @@
---
- name: Include user role
ansible.builtin.include_role:
name: user
vars:
user_username: "{{ actual_username }}"
user_password: "{{ actual_password }}"
- name: Create the actual container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ actual_username }}"
vars:
podman_container_name: actual-server
podman_container_image: docker.io/actualbudget/actual-server
podman_container_tag: "{{ actual_version }}"
podman_container_publish:
- 127.0.0.1:5006:5006
podman_simple_container_volumes:
- name: actual_data
mnt: /data
- name: Create a volume
become: true
community.docker.docker_volume:
name: actual_data
- name: Install the container
become: true
community.docker.docker_container:
name: actual-server
image: "docker.io/actualbudget/actual-server:{{ actual_version }}"
ports:
- "127.0.0.1:5006:5006/tcp"
mounts:
- source: actual_data
target: /data
restart_policy: always
- name: Include simple-reverse-proxy role
ansible.builtin.include_role:
name: simple-reverse-proxy

View file

@ -1,3 +1,2 @@
---
actual_username: actual
actual_version: 24.11.0

View file

@ -1,8 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
33376134646463343235646461303131626139663865333436646535383064383437616231323334
6162306132343165666134323966363739333638353332620a663034326361383233356639646463
65386537303530363335363234636464626330343864363162626233613430633430643334396636
6635653735633730310a343036363136333933653561663839613238336338633061613534326536
39343563343863643636616130316235316236656531626433613432303561383834333764336534
35636438613832643433346135623934323964346464383931353539633464333038626561643963
633839343438623261343239613534393233

View file

@ -1,6 +0,0 @@
---
- name: Restart ufw
become: true
ansible.builtin.systemd:
name: ufw.service
state: restarted

View file

@ -1,60 +0,0 @@
---
- name: Install basic packages
become: true
ansible.builtin.apt:
pkg:
- git
- vim
- dnsutils
- rsyslog
# - ufw
- podman
- snapd
state: present
- name: Install Snap Core
become: true
community.general.snap:
name: core
state: present
# - name: Set default policy (incoming)
# become: true
# community.general.ufw:
# direction: incoming
# policy: deny
# notify: Restart ufw
# - name: Set default policy (outgoing)
# become: true
# community.general.ufw:
# direction: outgoing
# policy: allow
# notify: Restart ufw
# - name: Set default policy (routed)
# become: true
# community.general.ufw:
# direction: routed
# policy: allow
# notify: Restart ufw
# - name: Allow forwarding in ufw
# become: true
# ansible.builtin.lineinfile:
# path: /etc/ufw/sysctl.conf
# regexp: '^#net/ipv4/ip_forward=1$'
# line: 'net/ipv4/ip_forward=1'
# notify: Restart ufw
# - name: Allow forwarding in sysctl
# become: true
# ansible.builtin.lineinfile:
# path: /etc/sysctl.conf
# regexp: '^#net\.ipv4\.ip_forward=1$'
# line: net.ipv4.ip_forward=1
# - name: Allow all access to ssh
# become: true
# community.general.ufw:
# rule: allow
# port: ssh
# proto: tcp
# notify: Restart ufw
# - name: Enable ufw
# become: true
# community.general.ufw:
# state: enabled

View file

@ -1,25 +1,19 @@
---
- name: Include user role
ansible.builtin.include_role:
name: user
vars:
user_username: "{{ changedetection_username }}"
user_password: "{{ changedetection_password }}"
- name: Create the changedetection container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ changedetection_username }}"
vars:
podman_container_name: changedetection-server
podman_container_image: docker.io/dgtlmoon/changedetection.io
podman_container_tag: "{{ changedetection_version }}"
podman_container_publish:
- 127.0.0.1:5000:5000
podman_simple_container_volumes:
- name: changedetection_data
mnt: /datastore
- name: Create a volume
become: true
community.docker.docker_volume:
name: changedetection_data
- name: Install the container
become: true
community.docker.docker_container:
name: changedetection-server
image: "docker.io/dgtlmoon/changedetection.io:{{ changedetection_version }}"
ports:
- "127.0.0.1:5000:5000/tcp"
mounts:
- source: changedetection_data
target: /datastore
restart_policy: always
- name: Include simple-reverse-proxy role
ansible.builtin.include_role:
name: simple-reverse-proxy

View file

@ -1,3 +1,2 @@
---
changedetection_username: changedetection
changedetection_version: 0.46.03
changedetection_version: 0.47.06

View file

@ -1,9 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
65363334626534616562376362316134623034396333646361646230313864323562316666623065
6464353838306530333366653932646163313963346265310a626664653234323765646338613666
30363762326431656635623839623561346332326363646465343263663931303638623239623439
6532353332613032390a616464306336313237396163353732363566303761393165643161633165
35663362623034396638313738643937353765306262653136313438636239663333336636323765
37313635386333323666303164333030616366316439653235353732616637613564623137316635
65323965656665633738336632643463653862623836613265663335633336616264333364383438
37383038393930656339

View file

@ -21,30 +21,24 @@
simple_reverse_proxy_internal_port: 9093
simple_reverse_proxy_internal_subdomain: alertmanager
# region: Install Grafana
- name: Include user role
ansible.builtin.include_role:
name: user
vars:
user_username: "{{ grafana_username }}"
user_password: "{{ grafana_password }}"
- name: Create the grafana container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ grafana_username }}"
vars:
podman_container_name: grafana-server
podman_container_image: docker.io/grafana/grafana
podman_container_tag: "{{ grafana_version }}"
podman_container_env:
GF_INSTALL_PLUGINS: "grafana-clock-panel 2.1.7"
podman_container_publish:
- 127.0.0.1:3000:3000
podman_simple_container_volumes:
- name: grafana_storage
mnt: /var/lib/grafana
- name: Include simple-reverse-proxy role - Grafana
- name: Create a volume
become: true
community.docker.docker_volume:
name: grafana_data
- name: Install the container
become: true
community.docker.docker_container:
name: grafana-server
image: "docker.io/grafana/grafana:{{ grafana_version }}"
ports:
- "127.0.0.1:3000:3000/tcp"
mounts:
- source: grafana_data
target: /var/lib/grafana
env:
GF_INSTALL_PLUGINS: "grafana-clock-panel 2.1.8"
restart_policy: always
- name: Include simple-reverse-proxy role
ansible.builtin.include_role:
name: simple-reverse-proxy
vars:

View file

@ -0,0 +1,16 @@
---
- name: Install basic packages
become: true
ansible.builtin.apt:
pkg:
- git
- vim
- dnsutils
- rsyslog
- snapd
state: present
- name: Install Snap Core
become: true
community.general.snap:
name: core
state: present

View file

@ -1,11 +0,0 @@
---
- name: Restart Nginx
become: true
ansible.builtin.systemd:
name: nginx.service
state: restarted
- name: Restart ufw
become: true
ansible.builtin.systemd:
name: ufw.service
state: restarted

View file

@ -1,93 +1,37 @@
---
- name: Create a user for running the pi-hole podman container
ansible.builtin.include_role:
name: user
vars:
user_username: "{{ pi_hole_username }}"
user_password: "{{ pi_hole_password }}"
- name: Create the pi-hole container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ pi_hole_username }}"
vars:
podman_container_name: pi-hole
podman_container_image: docker.io/pihole/pihole
podman_container_tag: "{{ pi_hole_version }}"
podman_container_publish:
- 127.0.0.1:5053:53/tcp
- 127.0.0.1:5053:53/udp
- 127.0.0.1:8080:80
podman_simple_container_volumes:
- name: etc-pihole
mnt: /etc/pihole
- name: etc-dnsmasq.d
mnt: /etc/dnsmasq.d
podman_container_env:
- name: Create a volume for DNS data
become: true
community.docker.docker_volume:
name: pihole_data
- name: Create a volume for Dnsmasq data
become: true
community.docker.docker_volume:
name: dnsmasq_data
- name: Install the container
become: true
community.docker.docker_container:
name: pi-hole
image: "docker.io/pihole/pihole:{{ pi_hole_version }}"
mounts:
- source: pihole_data
target: /etc/pihole
- source: dnsmasq_data
target: /etc/dnsmasq.d
network_mode: host
restart_policy: always
env:
TZ: 'Europe/Amsterdam'
WEBPASSWORD: "{{ pi_hole_web_password }}"
# VIRTUAL_HOST: 'pi-hole.kleinendorst.info'
# FTLCONF_LOCAL_IPV4: "{{ ansible_facts['default_ipv4']['address'] }}"
PIHOLE_DNS_: 1.1.1.1;1.0.0.1
DNSMASQ_USER: root
INTERFACE: tap0
- name: Install certificate for pi-hole.kleinendorst.info
become: true
ansible.builtin.command:
cmd: register_certbot_domain.sh pi-hole.kleinendorst.info
creates: /etc/letsencrypt/live/pi-hole.kleinendorst.info # The certificate directory
- name: Set Nginx configuration
become: true
ansible.builtin.template:
src: pi-hole.conf.j2
dest: /etc/nginx/conf.d/pi-hole.conf
mode: '0644'
notify: Restart Nginx
- name: Debug
ansible.builtin.debug:
msg: "Don't forget to manually add a DNS record for pi-hole.kleinendorst.info pointing to: {{ ansible_facts['default_ipv4']['address'] }}."
- name: Setup udp port forwarding (53 > 5053) in nginx
become: true
ansible.builtin.blockinfile:
path: /etc/nginx/nginx.conf
insertbefore: '^http \{$'
block: |
stream {
server {
listen 53;
proxy_pass 127.0.0.1:5053;
}
server {
listen 53 udp;
proxy_pass 127.0.0.1:5053;
}
}
notify: Restart Nginx
# - name: Add forwarding rules for ufw
# become: true
# ansible.builtin.blockinfile:
# path: /etc/ufw/before.rules
# insertbefore: "^\\*filter$"
# block: |
# *nat
# :PREROUTING ACCEPT [0:0]
# -A PREROUTING -p tcp -i eth0 --dport 53 -j DNAT \ --to-destination 127.0.0.1:5053
# -A PREROUTING -p udp -i eth0 --dport 53 -j DNAT \ --to-destination 127.0.0.1:5053
# COMMIT
# notify: Restart ufw
# - name: Allow all access to port 53 (udp)
# become: true
# community.general.ufw:
# rule: allow
# port: '53'
# proto: udp
# notify: Restart ufw
# - name: Allow all access to port 53 (tcp)
# become: true
# community.general.ufw:
# rule: allow
# port: '53'
# proto: tcp
# notify: Restart ufw
DNSMASQ_LISTENING: all
WEB_PORT: '8080'
- name: Include simple-reverse-proxy role
ansible.builtin.include_role:
name: simple-reverse-proxy
vars:
simple_reverse_proxy_internal_port: 8080
simple_reverse_proxy_internal_subdomain: pi-hole
simple_reverse_proxy_redirect_to: /admin

View file

@ -1,25 +0,0 @@
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name pi-hole.kleinendorst.info;
# SSL via Let's Encrypt
ssl_certificate /etc/letsencrypt/live/pi-hole.kleinendorst.info/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/pi-hole.kleinendorst.info/privkey.pem; # managed by Certbot
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
location = / {
return 301 https://pi-hole.kleinendorst.info/admin;
}
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
set $upstream_address 127.0.0.1;
set $upstream_port 8080;
set $upstream_proto http;
proxy_pass $upstream_proto://$upstream_address:$upstream_port;
}
}

View file

@ -1,3 +1,2 @@
---
pi_hole_username: pi-hole
pi_hole_version: 2024.07.0

View file

@ -1,11 +1,9 @@
$ANSIBLE_VAULT;1.1;AES256
38343333306431366465313835386337326366336363336265326563306363646131636566616339
6661613931366263333039346530356336323932383236380a636638343531383731613930353033
37643532353933323633353539366637653565643539613262623037366333316361346462393133
6431633163333931360a626130653537633962326363306630306264356330646637373236393334
32383131396439393761343363353763356632333039303962633561663661323739393862353237
39343739333663656337396530366263386166323730353839393039313932323165333532616264
62393733386138616330383962666166373361313064313631353337343966623763326635666261
62343736366666623236303638346337656564313931353634633535353037666565653965646162
65626361623862643262346663633532643365306362666335626432633763333861326533353631
3963343336313630663366356638656465613735633930393534
31623263303861666139376462643866323437386464323334666434343837373031386462313536
3538306437346465346466376639666339353137333366660a383164666539373635663263326264
35353533313564336432646566346261313633333837663235643438333462343039353462663831
3637316430363666650a663932306561373333316666376337666264373737383037653531363861
30636539323361643365613139663137313137373265313266396337666237396437663433633032
34373561373262333034636136346130333631626139346535663034613830323363336461366363
37343535376138653163363833616335653566373031393131383764623636393032396165383938
34386539373261313333

View file

@ -1,15 +1,7 @@
#!/bin/bash
echo "Running as $(whoami)..."
target_user='postgres'
# This user shouldn't be mapped to postgres on the host but rather to postgres on the container.
# This user has host uid: 558821 (in container it's uid: 70). This number is resolved by getting the start
# of the subuid range for this user and then than adding 70 (-1) to it (since we know that that is the uid
# of the postgres user within the container).
target_path_subuid_start="$(su $target_user -c 'grep $USER /etc/subuid | cut -d ":" -f 2')"
target_host_postgres_id=$(($target_path_subuid_start + 70 - 1))
certsPath="/home/$target_user/certs"
certsPath="/home/postgres/certs"
target_host_postgres_id=70
if [[ ! -e "$certsPath" ]]; then
echo "Certs directory doesn't exist, creating certs directory: $certsPath..."
@ -23,8 +15,6 @@ for srcPath in $cert_files; do
cp -L "$srcPath" "$certsPath"
newFileName="$certsPath/$(basename $srcPath)"
echo "Setting permissions for: $newFileName to uid: $target_host_postgres_id..."
chown "$target_host_postgres_id:$target_host_postgres_id" "$newFileName"
chmod 0600 "$newFileName"
done

View file

@ -5,6 +5,7 @@
vars:
user_username: "{{ postgres_unix_username }}"
user_password: "{{ postgres_unix_password }}"
user_add_to_docker_group: true
- name: Install ensure_certificate_setup.sh
become: true
ansible.builtin.copy:
@ -12,6 +13,7 @@
dest: "/root/.bin/"
mode: '0700'
owner: root
# Output of the hook can be found as part of the logs at: /var/log/letsencrypt/letsencrypt.log
- name: Create certificates for PostgreSQL (postgres.kleinendorst.info)
become: true
ansible.builtin.command:
@ -24,46 +26,24 @@
--agree-tos -m {{ administration_email }}
-d postgres.kleinendorst.info
creates: "/etc/letsencrypt/live/postgres.kleinendorst.info"
- name: Create the postgres container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ postgres_unix_username }}"
vars:
podman_container_name: postgres
podman_container_image: docker.io/postgres
podman_container_tag: "{{ postgres_version }}"
podman_container_publish:
- 0.0.0.0:5432:5432
podman_container_volumes:
- "/home/{{ postgres_unix_username }}/certs/fullchain.pem:/var/lib/postgresql/fullchain.pem:ro"
- "/home/{{ postgres_unix_username }}/certs/privkey.pem:/var/lib/postgresql/privkey.pem:ro"
podman_simple_container_volumes:
- name: postgres_data
mnt: /var/lib/postgresql/data
podman_container_command:
- -c
- ssl=on
- -c
- ssl_cert_file=/var/lib/postgresql/fullchain.pem
- -c
- ssl_key_file=/var/lib/postgresql/privkey.pem
podman_container_env:
POSTGRES_PASSWORD: "{{ postgres_password }}"
- name: Create the postgres prometheus exporter container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ postgres_unix_username }}"
vars:
podman_container_name: postgres-prometheus-exporter
podman_container_image: quay.io/prometheuscommunity/postgres-exporter
podman_container_tag: "{{ postgres_prometheus_exporter_version }}"
podman_container_publish:
- 0.0.0.0:9187:9187
podman_container_env:
DATA_SOURCE_URI: "postgres.kleinendorst.info:5432/postgres"
DATA_SOURCE_USER: "postgres"
DATA_SOURCE_PASS: "{{ postgres_password }}"
- name: Create the compose project directory
become: true
become_user: "{{ postgres_unix_username }}"
ansible.builtin.file:
path: "/home/{{ postgres_unix_username }}/postgres"
state: directory
owner: "{{ postgres_unix_username }}"
mode: '0744'
- name: Create the compose project
become: true
become_user: "{{ postgres_unix_username }}"
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "/home/{{ postgres_unix_username }}/postgres/docker-compose.yml"
owner: "{{ postgres_unix_username }}"
mode: '0644'
- name: Create and start services
become: true
community.docker.docker_compose_v2:
project_src: "/home/{{ postgres_unix_username }}/postgres/"
register: docker_compose_output

View file

@ -0,0 +1,30 @@
---
services:
postgres:
image: docker.io/postgres:{{ postgres_version }}
ports:
- "0.0.0.0:5432:5432"
restart: always
volumes:
- "/home/{{ postgres_unix_username }}/certs/fullchain.pem:/var/lib/postgresql/fullchain.pem:ro"
- "/home/{{ postgres_unix_username }}/certs/privkey.pem:/var/lib/postgresql/privkey.pem:ro"
- "postgres_data:/var/lib/postgresql/data"
command:
- -c
- ssl=on
- -c
- ssl_cert_file=/var/lib/postgresql/fullchain.pem
- -c
- ssl_key_file=/var/lib/postgresql/privkey.pem
environment:
POSTGRES_PASSWORD: "{{ postgres_password }}"
postgres-prometheus-exporter:
image: quay.io/prometheuscommunity/postgres-exporter:{{ postgres_prometheus_exporter_version }}
ports:
- "0.0.0.0:9187:9187"
environment:
DATA_SOURCE_URI: "postgres.kleinendorst.info:5432/postgres"
DATA_SOURCE_USER: "postgres"
DATA_SOURCE_PASS: "{{ postgres_password }}"
volumes:
postgres_data:

View file

@ -11,13 +11,6 @@
dest: "/etc/nginx/conf.d/{{ simple_reverse_proxy_internal_subdomain }}.conf"
mode: '0644'
notify: Restart Nginx
# - name: Allow https through firewall
# become: true
# community.general.ufw:
# rule: allow
# port: https
# proto: tcp
# notify: Restart ufw
- name: Debug
ansible.builtin.debug:
msg: >-

View file

@ -11,6 +11,14 @@ server {
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
{% if simple_reverse_proxy_redirect_to != "" %}
location = / {
return 301 https://{{ simple_reverse_proxy_internal_subdomain }}.kleinendorst.info{{ simple_reverse_proxy_redirect_to }};
}
{% endif %}
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;

View file

@ -1,2 +1,3 @@
---
simple_reverse_proxy_redirect_to: ''
simple_reverse_proxy_external_port: 443

View file

@ -0,0 +1,2 @@
---
user_add_to_docker_group: false

View file

@ -1,38 +1,25 @@
---
- name: Include user role
ansible.builtin.include_role:
name: user
vars:
user_username: "{{ wedding_username }}"
user_password: "{{ wedding_password }}"
- name: Login to ghcr registry and create ${XDG_RUNTIME_DIR}/containers/auth.json
- name: Log into private GitHub registry
become: true
become_user: "{{ wedding_username }}"
containers.podman.podman_login:
community.docker.docker_login:
registry_url: ghcr.io
username: "{{ github_registry_user }}"
password: "{{ github_registry_token }}"
registry: ghcr.io
changed_when: false
- name: Create the wedding container
ansible.builtin.include_role:
name: podman-container
apply:
become: true
become_user: "{{ wedding_username }}"
vars:
podman_container_name: wedding-server
podman_container_image: ghcr.io/kleinendorst/wedding
podman_container_tag: "{{ wedding_version }}"
podman_container_publish:
- 127.0.0.1:3001:3000
podman_simple_container_volumes: []
podman_container_env:
- name: Install the container
become: true
community.docker.docker_container:
name: wedding-server
image: "ghcr.io/kleinendorst/wedding:{{ wedding_version }}"
ports:
- "127.0.0.1:3001:3000/tcp"
env:
DATABASE_HOST: 'postgres.kleinendorst.info'
DATABASE_PORT: 5432
DATABASE_DBNAME: wedding
DATABASE_PORT: '5432'
DATABASE_DBNAME: 'wedding'
DATABASE_USER: "{{ postgres.user }}"
DATABASE_PASSWORD: "{{ postgres.password }}"
SESSION_SECRET: "{{ wedding_env.secret }}"
NODE_ENV: production
NODE_ENV: 'production'
WEDDING_FULL_ACCESS_CODE: "{{ wedding_env.full_access_code }}"
WEDDING_NIGHT_ACCESS_CODE: "{{ wedding_env.night_access_code }}"
restart_policy: always