fix: Gitea Traefik routing and connection pool optimization
Some checks failed
🚀 Build & Deploy Image / Determine Build Necessity (push) Failing after 10m14s
🚀 Build & Deploy Image / Build Runtime Base Image (push) Has been skipped
🚀 Build & Deploy Image / Build Docker Image (push) Has been skipped
🚀 Build & Deploy Image / Run Tests & Quality Checks (push) Has been skipped
🚀 Build & Deploy Image / Auto-deploy to Staging (push) Has been skipped
🚀 Build & Deploy Image / Auto-deploy to Production (push) Has been skipped
Security Vulnerability Scan / Check for Dependency Changes (push) Failing after 11m25s
Security Vulnerability Scan / Composer Security Audit (push) Has been cancelled

- Remove middleware reference from Gitea Traefik labels (caused routing issues)
- Optimize Gitea connection pool settings (MAX_IDLE_CONNS=30, authentication_timeout=180s)
- Add explicit service reference in Traefik labels
- Fix intermittent 504 timeouts by improving PostgreSQL connection handling

Fixes Gitea unreachability via git.michaelschiemer.de
This commit is contained in:
2025-11-09 14:46:15 +01:00
parent 85c369e846
commit 36ef2a1e2c
1366 changed files with 104925 additions and 28719 deletions

View File

@@ -1,11 +1,15 @@
---
# Source path for application stack files on the control node
# Use playbook_dir as base, then go to ../stacks/application
# Source path for production stack files on the control node
# Use playbook_dir as base, then go to ../stacks/production
# This assumes playbooks are in deployment/ansible/playbooks
application_stack_src: "{{ playbook_dir | default(role_path + '/..') }}/../stacks/application"
# Note: Use ~ for string concatenation in Jinja2 templates
# Note: Don't use application_stack_src in the default chain to avoid recursion
application_stack_src: "{{ (playbook_dir | default(role_path + '/..') | dirname | dirname | dirname) ~ '/stacks/production' }}"
# Destination path on the target host (defaults to configured app_stack_path)
application_stack_dest: "{{ app_stack_path | default(stacks_base_path + '/application') }}"
# Note: Don't use application_stack_dest in the default chain to avoid recursion
# Note: Use ~ for string concatenation in Jinja2 templates
application_stack_dest: "{{ app_stack_path | default((stacks_base_path | default('/home/deploy/deployment/stacks')) ~ '/production') }}"
# Template used to generate the application .env file
application_env_template: "{{ role_path }}/../../templates/application.env.j2"
@@ -44,3 +48,69 @@ application_compose_suffix: "{{ 'staging.yml' if application_environment == 'sta
# Service names based on environment
application_service_name: "{{ 'staging-app' if application_environment == 'staging' else 'php' }}"
application_php_service_name: "{{ application_service_name }}"
# Code Deployment Configuration
application_code_dest: "/home/deploy/michaelschiemer/current"
application_deployment_method: "git" # Options: git, rsync
application_git_repository_url_default: "https://git.michaelschiemer.de/michael/michaelschiemer.git"
application_git_branch: "{{ 'staging' if application_environment == 'staging' else 'main' }}"
application_git_retries: 5
application_git_retry_delay: 10
application_rsync_source: "{{ playbook_dir | default('') | dirname | dirname | dirname }}"
application_rsync_opts:
- "--chmod=D755,F644"
- "--exclude=.git"
- "--exclude=.gitignore"
- "--exclude=node_modules"
- "--exclude=vendor"
- "--exclude=.env"
- "--exclude=.env.*"
- "--exclude=*.log"
- "--exclude=.idea"
- "--exclude=.vscode"
- "--exclude=.DS_Store"
- "--exclude=*.swp"
- "--exclude=*.swo"
- "--exclude=*~"
- "--exclude=.phpunit.result.cache"
- "--exclude=coverage"
- "--exclude=.phpunit.cache"
- "--exclude=public/assets"
- "--exclude=storage/logs"
- "--exclude=storage/framework/cache"
- "--exclude=storage/framework/sessions"
- "--exclude=storage/framework/views"
- "--exclude=deployment"
- "--exclude=docker"
- "--exclude=.deployment-archive-*"
- "--exclude=docs"
- "--exclude=tests"
application_php_scripts:
- worker.php
- console.php
application_critical_files:
- worker.php
- console.php
- composer.json
# Composer Configuration
application_restart_workers_after_composer: true
# Container Management Configuration
application_container_action: "fix" # Options: fix, fix-web, recreate, recreate-with-env, sync-recreate
application_container_target_services: "queue-worker scheduler"
application_container_status_services: "queue-worker web scheduler php"
application_container_stabilize_wait: 5
# Health Check Configuration
application_health_check_logs_tail: 20
application_health_check_final: false
application_show_status: true
# Logs Configuration
application_logs_tail: 50
application_logs_check_vendor: true
application_logs_check_permissions: true
application_logs_check_files: true
application_logs_list_files: false

View File

@@ -0,0 +1,10 @@
---
# Handlers for Application Role
- name: restart application workers
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} restart queue-worker scheduler
changed_when: true
failed_when: false

View File

@@ -0,0 +1,87 @@
---
# Install Composer Dependencies in Application Container
- name: Check if composer.json exists
ansible.builtin.stat:
path: "{{ application_code_dest }}/composer.json"
register: composer_json_exists
- name: Fail if composer.json is missing
ansible.builtin.fail:
msg: "composer.json not found at {{ application_code_dest }}/composer.json"
when: not composer_json_exists.stat.exists
- name: Check if container is running
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} ps {{ application_php_service_name }} --format json
register: container_status
changed_when: false
failed_when: false
- name: Display container status
ansible.builtin.debug:
msg: "Container status: {{ container_status.stdout }}"
when: application_show_status | default(true) | bool
- name: Fail if container is not running
ansible.builtin.fail:
msg: |
Container '{{ application_php_service_name }}' is not running!
The container must be started before installing composer dependencies.
This is typically done by the 'deploy-image.yml' playbook which should run before this.
To start the container manually:
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} up -d {{ application_php_service_name }}
Note: The container requires environment variables (DB_USERNAME, DB_PASSWORD, etc.)
which should be set in a .env file or via docker-compose environment configuration.
when: container_status.rc != 0 or '"State":"running"' not in container_status.stdout
- name: Install composer dependencies in PHP container
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} exec -T {{ application_php_service_name }} composer install --no-dev --optimize-autoloader --no-interaction
register: composer_install
changed_when: true
failed_when: composer_install.rc != 0
- name: Display composer install output
ansible.builtin.debug:
msg: |
Composer Install Output:
stdout: {{ composer_install.stdout }}
stderr: {{ composer_install.stderr }}
rc: {{ composer_install.rc }}
when:
- composer_install.rc != 0
- application_show_status | default(true) | bool
- name: Restart queue-worker and scheduler to pick up vendor directory
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} restart queue-worker scheduler
register: restart_workers
changed_when: true
failed_when: false
when: application_restart_workers_after_composer | default(true) | bool
- name: Verify vendor/autoload.php exists
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} exec -T {{ application_php_service_name }} test -f /var/www/html/vendor/autoload.php && echo "EXISTS" || echo "MISSING"
register: autoload_check
changed_when: false
- name: Display autoload verification
ansible.builtin.debug:
msg: "vendor/autoload.php: {{ autoload_check.stdout.strip() }}"
when: application_show_status | default(true) | bool
- name: Fail if autoload.php is missing
ansible.builtin.fail:
msg: "vendor/autoload.php was not created after composer install"
when: "autoload_check.stdout.strip() != 'EXISTS'"

View File

@@ -0,0 +1,86 @@
---
# Container Management Tasks (Fix, Recreate, etc.)
- name: Check if vendor directory exists on host
ansible.builtin.stat:
path: "{{ application_code_dest }}/vendor"
register: vendor_dir_exists
- name: Display vendor directory status
ansible.builtin.debug:
msg: "vendor directory on host: {{ 'EXISTS' if vendor_dir_exists.stat.exists else 'MISSING' }}"
when: application_show_status | default(true) | bool
- name: Install composer dependencies in PHP container (if vendor missing)
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} exec -T {{ application_php_service_name }} composer install --no-dev --optimize-autoloader --no-interaction
register: composer_install
changed_when: true
failed_when: composer_install.rc != 0
when:
- application_container_action | default('fix') == 'fix'
- not vendor_dir_exists.stat.exists
- name: Verify vendor/autoload.php exists in container
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} exec -T {{ application_php_service_name }} test -f /var/www/html/vendor/autoload.php && echo "EXISTS" || echo "MISSING"
register: autoload_check
changed_when: false
when: application_container_action | default('fix') == 'fix'
- name: Display autoload verification
ansible.builtin.debug:
msg: "vendor/autoload.php in container: {{ autoload_check.stdout.strip() }}"
when:
- application_container_action | default('fix') == 'fix'
- application_show_status | default(true) | bool
- name: Recreate web container with new security settings
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} up -d --force-recreate --no-deps web
register: recreate_web
changed_when: true
when:
- application_container_action | default('fix') in ['fix', 'fix-web']
- name: Recreate queue-worker and scheduler containers
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} up -d --force-recreate {{ application_container_target_services | default('queue-worker scheduler') }}
register: recreate_containers
changed_when: true
when:
- application_container_action | default('fix') in ['recreate', 'recreate-with-env', 'sync-recreate']
- name: Restart queue-worker and scheduler to pick up vendor directory
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} restart queue-worker scheduler
register: restart_workers
changed_when: true
failed_when: false
when:
- application_container_action | default('fix') == 'fix'
- application_restart_workers_after_composer | default(true) | bool
- name: Wait for containers to stabilize
ansible.builtin.pause:
seconds: "{{ application_container_stabilize_wait | default(5) }}"
when: application_container_action | default('fix') in ['fix', 'recreate', 'recreate-with-env', 'sync-recreate']
- name: Get final container status
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} ps {{ application_container_status_services | default('queue-worker web scheduler php') }}
register: final_status
changed_when: false
- name: Display final container status
ansible.builtin.debug:
msg: |
{{ final_status.stdout }}
when: application_show_status | default(true) | bool

View File

@@ -1,12 +1,326 @@
---
- name: Debug all available variables before password determination
ansible.builtin.debug:
msg: |
Available variables for registry password:
- docker_registry_password_default defined: {{ docker_registry_password_default is defined }}
- vault_docker_registry_password defined: {{ vault_docker_registry_password is defined }}
- All vault_* variable names: {{ vars.keys() | select('match', '^vault_.*') | list | join(', ') }}
delegate_to: localhost
become: no
- name: Check if docker_registry_password_default is set (safe check)
ansible.builtin.set_fact:
_docker_registry_password_default_set: "{{ 'YES' if (docker_registry_password_default is defined and docker_registry_password_default | string | trim != '') else 'NO' }}"
delegate_to: localhost
become: no
when: docker_registry_password_default is defined
- name: Check if vault_docker_registry_password is set (safe check)
ansible.builtin.set_fact:
_vault_docker_registry_password_set: "{{ 'YES' if (vault_docker_registry_password is defined and vault_docker_registry_password | string | trim != '') else 'NO' }}"
delegate_to: localhost
become: no
when: vault_docker_registry_password is defined
- name: Debug password status
ansible.builtin.debug:
msg: |
Password status:
- docker_registry_password_default: {{ _docker_registry_password_default_set | default('NOT DEFINED') }}
- vault_docker_registry_password: {{ _vault_docker_registry_password_set | default('NOT DEFINED') }}
delegate_to: localhost
become: no
- name: Determine Docker registry password from vault or defaults
ansible.builtin.set_fact:
registry_password: >-
{%- if docker_registry_password_default is defined and docker_registry_password_default | string | trim != '' -%}
{{ docker_registry_password_default }}
{%- elif vault_docker_registry_password is defined and vault_docker_registry_password | string | trim != '' -%}
{{ vault_docker_registry_password }}
{%- else -%}
{{ '' }}
{%- endif -%}
no_log: yes
- name: Debug registry password source after determination
ansible.builtin.debug:
msg: |
Registry password determination result:
- docker_registry_password_default: {{ 'SET (length: ' + (docker_registry_password_default | default('') | string | length | string) + ')' if (docker_registry_password_default | default('') | string | trim) != '' else 'NOT SET' }}
- vault_docker_registry_password defined: {{ vault_docker_registry_password is defined }}
- vault_docker_registry_password set: {{ 'YES (length: ' + (vault_docker_registry_password | default('') | string | length | string) + ')' if (vault_docker_registry_password | default('') | string | trim) != '' else 'NO' }}
- registry_password set: {{ 'YES (length: ' + (registry_password | default('') | string | length | string) + ')' if (registry_password | default('') | string | trim) != '' else 'NO' }}
delegate_to: localhost
become: no
- name: Debug vault loading
ansible.builtin.debug:
msg: |
Vault loading status:
- Vault file exists: {{ application_vault_stat.stat.exists | default(false) }}
- vault_docker_registry_password defined: {{ vault_docker_registry_password is defined }}
- vault_docker_registry_password value: {{ 'SET (length: ' + (vault_docker_registry_password | default('') | string | length | string) + ')' if (vault_docker_registry_password | default('') | string | trim) != '' else 'NOT SET or EMPTY' }}
- registry_password: {{ 'SET (length: ' + (registry_password | default('') | string | length | string) + ')' if (registry_password | default('') | string | trim) != '' else 'NOT SET or EMPTY' }}
when: true
no_log: yes
- name: Check if registry is accessible
ansible.builtin.uri:
url: "http://{{ docker_registry | default('localhost:5000') }}/v2/"
method: GET
status_code: [200, 401]
timeout: 5
register: registry_check
ignore_errors: yes
delegate_to: "{{ inventory_hostname }}"
become: no
- name: Debug registry accessibility
ansible.builtin.debug:
msg: |
Registry accessibility check:
- Registry URL: http://{{ docker_registry | default('localhost:5000') }}/v2/
- Status code: {{ registry_check.status | default('UNKNOWN') }}
- Accessible: {{ 'YES' if registry_check.status | default(0) in [200, 401] else 'NO' }}
- Note: Status 401 means registry requires authentication (expected)
delegate_to: localhost
become: no
- name: Login to Docker registry
community.docker.docker_login:
registry_url: "{{ docker_registry | default('localhost:5000') }}"
username: "{{ docker_registry_username_default | default('admin') }}"
password: "{{ registry_password }}"
when:
- registry_password | string | trim != ''
- registry_check.status | default(0) in [200, 401]
no_log: yes
ignore_errors: yes
register: docker_login_result
- name: Warn if Docker registry login failed
ansible.builtin.debug:
msg: "WARNING: Docker registry login failed or skipped. Images may not be pullable without authentication."
when:
- registry_password | string | trim != ''
- docker_login_result.failed | default(false)
- name: Debug registry authentication status
ansible.builtin.debug:
msg: |
Registry authentication status:
- Registry: {{ docker_registry | default('localhost:5000') }}
- Password set: {{ 'YES' if (registry_password | string | trim) != '' else 'NO' }}
- Login result: {{ 'SUCCESS' if (docker_login_result.failed | default(true) == false) else 'FAILED or SKIPPED' }}
- Username: {{ docker_registry_username_default | default('admin') }}
when: true
- name: Fail if registry password is not set
ansible.builtin.fail:
msg: |
Docker registry authentication required but password not set!
The registry at {{ docker_registry | default('localhost:5000') }} requires authentication.
Please set the password in one of these ways:
1. Set in vault file (recommended):
ansible-vault edit {{ vault_file | default('inventory/group_vars/production/vault.yml') }}
# Add: vault_docker_registry_password: "your-password"
2. Pass via extra vars:
-e "docker_registry_password_default=your-password"
3. Use init-secrets.sh script to generate all passwords:
cd deployment/ansible
./scripts/init-secrets.sh
Note: The registry password was likely generated when the registry stack was deployed.
Check the registry role output or the vault file for the generated password.
when:
- registry_password | string | trim == ''
- docker_registry | default('localhost:5000') == 'localhost:5000'
- name: Check registry htpasswd file to verify password
ansible.builtin.shell: |
if [ -f "{{ registry_auth_path | default('/home/deploy/deployment/stacks/registry/auth') }}/htpasswd" ]; then
cat "{{ registry_auth_path | default('/home/deploy/deployment/stacks/registry/auth') }}/htpasswd"
else
echo "htpasswd file not found"
fi
register: registry_htpasswd_check
changed_when: false
failed_when: false
delegate_to: "{{ inventory_hostname }}"
become: no
when: docker_login_result.failed | default(false)
- name: Debug registry password mismatch
ansible.builtin.debug:
msg: |
Registry authentication failed!
Registry: {{ docker_registry | default('localhost:5000') }}
Username: {{ docker_registry_username_default | default('admin') }}
Possible causes:
1. The password in vault does not match the password used during registry deployment
2. The registry was deployed with a different password (generated by registry role)
3. The username is incorrect
To fix:
1. Check the registry htpasswd file on the server:
cat {{ registry_auth_path | default('/home/deploy/deployment/stacks/registry/auth') }}/htpasswd
2. Extract the password from the registry .env file (if available):
grep REGISTRY_AUTH {{ registry_stack_path | default('/home/deploy/deployment/stacks/registry') }}/.env
3. Update the vault file with the correct password:
ansible-vault edit {{ vault_file | default('inventory/group_vars/production/vault.yml') }}
# Set: vault_docker_registry_password: "correct-password"
4. Or re-deploy the registry stack with the password from vault:
ansible-playbook -i inventory/production.yml playbooks/setup-infrastructure.yml --tags registry
Registry htpasswd file content:
{{ registry_htpasswd_check.stdout | default('NOT FOUND') }}
when:
- registry_password | string | trim != ''
- docker_login_result.failed | default(false)
- name: Fail if registry authentication failed and password was provided
ansible.builtin.fail:
msg: |
Docker registry authentication failed!
Registry: {{ docker_registry | default('localhost:5000') }}
Username: {{ docker_registry_username_default | default('admin') }}
The password in the vault file does not match the password used during registry deployment.
Please check the debug output above for instructions on how to fix this.
when:
- registry_password | string | trim != ''
- docker_login_result.failed | default(false)
- name: Force pull latest Docker images before deployment
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.{{ application_compose_suffix }} pull --ignore-pull-failures
changed_when: false
failed_when: false
when: not ansible_check_mode
- name: Verify entrypoint script exists in Docker image (method 1 - file check)
shell: |
docker run --rm --entrypoint=/bin/sh {{ docker_registry | default('localhost:5000') }}/{{ app_name | default('framework') }}:latest -c "test -f /usr/local/bin/entrypoint.sh && ls -la /usr/local/bin/entrypoint.sh || echo 'FILE_NOT_FOUND'"
register: entrypoint_check
changed_when: false
failed_when: false
- name: Verify entrypoint script exists in Docker image (method 2 - inspect image)
shell: |
docker image inspect {{ docker_registry | default('localhost:5000') }}/{{ app_name | default('framework') }}:latest --format '{{ "{{" }}.Config.Entrypoint{{ "}}" }}' 2>&1 || echo "INSPECT_FAILED"
register: entrypoint_inspect
changed_when: false
failed_when: false
- name: Verify entrypoint script exists in Docker image (method 3 - extract and check)
shell: |
CONTAINER_ID=$(docker create {{ docker_registry | default('localhost:5000') }}/{{ app_name | default('framework') }}:latest 2>/dev/null) && \
docker cp $CONTAINER_ID:/usr/local/bin/entrypoint.sh /tmp/entrypoint_check.sh 2>&1 && \
if [ -f /tmp/entrypoint_check.sh ]; then \
echo "FILE_EXISTS"; \
ls -la /tmp/entrypoint_check.sh; \
head -5 /tmp/entrypoint_check.sh; \
rm -f /tmp/entrypoint_check.sh; \
else \
echo "FILE_NOT_FOUND"; \
fi && \
docker rm $CONTAINER_ID >/dev/null 2>&1 || true
register: entrypoint_extract
changed_when: false
failed_when: false
- name: Set entrypoint verification message
set_fact:
entrypoint_verification_msg: |
==========================================
Entrypoint Script Verification
==========================================
Image: {{ docker_registry | default('localhost:5000') }}/{{ app_name | default('framework') }}:latest
Method 1 - File Check:
Return Code: {{ entrypoint_check.rc | default('unknown') }}
Output: {{ entrypoint_check.stdout | default('No output') }}
Method 2 - Image Inspect:
Entrypoint Config: {{ entrypoint_inspect.stdout | default('Not available') }}
Method 3 - Extract and Check:
{{ entrypoint_extract.stdout | default('Check not performed') }}
{% if 'FILE_NOT_FOUND' in entrypoint_check.stdout or 'FILE_NOT_FOUND' in entrypoint_extract.stdout %}
⚠️ WARNING: Entrypoint script NOT FOUND in image!
This means the Docker image was built without the entrypoint script.
Possible causes:
1. The entrypoint script was not copied during rsync to build directory
2. The Dockerfile COPY command failed silently
3. The image needs to be rebuilt with --no-cache
Next steps:
1. Rebuild the image: ansible-playbook -i inventory/production.yml playbooks/build-initial-image.yml --vault-password-file secrets/.vault_pass -e "build_no_cache=true"
2. Check if docker/entrypoint.sh exists on server: ls -la /home/deploy/michaelschiemer/docker/entrypoint.sh
3. Manually check image: docker run --rm --entrypoint=/bin/sh localhost:5000/framework:latest -c "ls -la /usr/local/bin/entrypoint.sh"
{% elif entrypoint_check.rc == 0 %}
✅ Entrypoint script found in image
File details: {{ entrypoint_check.stdout }}
{% if '\r' in entrypoint_extract.stdout %}
⚠️ CRITICAL: Entrypoint script has CRLF line endings!
The script contains \r characters which will cause "no such file or directory" errors.
The script needs to be converted to LF line endings before building the image.
{% endif %}
{% else %}
⚠️ Could not verify entrypoint script (check may have failed)
{% endif %}
==========================================
- name: Display entrypoint script verification result
debug:
var: entrypoint_verification_msg
- name: Deploy application stack
community.docker.docker_compose_v2:
project_src: "{{ application_stack_dest }}"
files:
- docker-compose.base.yml
- "docker-compose.{{ application_compose_suffix }}"
state: present
pull: always
recreate: "{{ application_compose_recreate }}"
remove_orphans: "{{ application_remove_orphans | bool }}"
register: application_compose_result
failed_when: false
- name: Show PHP container logs if deployment failed
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.{{ application_compose_suffix }} logs --tail=50 {{ application_service_name }} 2>&1 || true
register: application_php_logs
changed_when: false
when: application_compose_result.failed | default(false)
- name: Display PHP container logs on failure
debug:
msg: |
PHP Container Logs (last 50 lines):
{{ application_php_logs.stdout | default('No logs available') }}
when: application_compose_result.failed | default(false)
- name: Fail if deployment failed
fail:
msg: "Application stack deployment failed. Check logs above for details."
when: application_compose_result.failed | default(false)
- name: Wait for application container to report Up
shell: |
@@ -17,6 +331,48 @@
retries: "{{ ((application_wait_timeout | int) + (application_wait_interval | int) - 1) // (application_wait_interval | int) }}"
delay: "{{ application_wait_interval | int }}"
when: application_compose_result.changed
failed_when: false
- name: Show container status when container doesn't start
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.{{ application_compose_suffix }} ps {{ application_service_name }}
register: application_container_status
changed_when: false
when:
- application_compose_result.changed
- application_app_running.rc != 0
- name: Show PHP container logs when container doesn't start
shell: |
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.{{ application_compose_suffix }} logs --tail=100 {{ application_service_name }} 2>&1 || true
register: application_php_logs_failed
changed_when: false
when:
- application_compose_result.changed
- application_app_running.rc != 0
- name: Display container status and logs when startup failed
debug:
msg: |
Container Status:
{{ application_container_status.stdout | default('Container not found') }}
Container Logs (last 100 lines):
{{ application_php_logs_failed.stdout | default('No logs available') }}
when:
- application_compose_result.changed
- application_app_running.rc != 0
- name: Fail if container didn't start
fail:
msg: |
Application container '{{ application_service_name }}' failed to start.
Check the logs above for details.
You can also check manually with:
docker compose -f {{ application_stack_dest }}/docker-compose.base.yml -f {{ application_stack_dest }}/docker-compose.{{ application_compose_suffix }} logs {{ application_service_name }}
when:
- application_compose_result.changed
- application_app_running.rc != 0
- name: Ensure app container is running before migrations
shell: |

View File

@@ -0,0 +1,236 @@
---
# Deploy Application Code via Git or Rsync
- name: Set git_repo_url from provided value or default
ansible.builtin.set_fact:
git_repo_url: "{{ application_git_repository_url if (application_git_repository_url is defined and application_git_repository_url != '') else application_git_repository_url_default }}"
- name: Determine deployment method
ansible.builtin.set_fact:
deployment_method: "{{ application_deployment_method | default('git') }}"
when: application_deployment_method is not defined
- name: Ensure Git is installed (for Git deployment)
ansible.builtin.apt:
name: git
state: present
update_cache: no
become: yes
when: deployment_method == 'git'
- name: Ensure application code directory exists
ansible.builtin.file:
path: "{{ application_code_dest }}"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
become: yes
# Git Deployment Tasks
- name: Check if repository already exists (Git)
ansible.builtin.stat:
path: "{{ application_code_dest }}/.git"
register: git_repo_exists
when: deployment_method == 'git'
- name: Check if destination directory exists (Git)
ansible.builtin.stat:
path: "{{ application_code_dest }}"
register: dest_dir_exists
when: deployment_method == 'git'
- name: Remove destination directory if it exists but is not a git repo (Git)
ansible.builtin.file:
path: "{{ application_code_dest }}"
state: absent
when:
- deployment_method == 'git'
- dest_dir_exists.stat.exists
- not git_repo_exists.stat.exists
become: yes
- name: Clone repository (if not exists) (Git)
ansible.builtin.git:
repo: "{{ git_repo_url }}"
dest: "{{ application_code_dest }}"
version: "{{ application_git_branch }}"
force: no
update: no
when:
- deployment_method == 'git'
- not git_repo_exists.stat.exists
environment:
GIT_TERMINAL_PROMPT: "0"
vars:
ansible_become: no
register: git_clone_result
retries: "{{ application_git_retries | default(5) }}"
delay: "{{ application_git_retry_delay | default(10) }}"
until: git_clone_result is succeeded
ignore_errors: yes
- name: Fail if git clone failed after retries (Git)
ansible.builtin.fail:
msg: "Failed to clone repository after {{ application_git_retries | default(5) }} retries. Gitea may be unreachable or overloaded. Last error: {{ git_clone_result.msg | default('Unknown error') }}"
when:
- deployment_method == 'git'
- not git_repo_exists.stat.exists
- git_clone_result is failed
- name: Check if repository is already on correct branch (Git)
ansible.builtin.shell: |
cd {{ application_code_dest }}
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "")
TARGET_BRANCH="{{ application_git_branch | default('main') }}"
if [ "$CURRENT_BRANCH" = "$TARGET_BRANCH" ] || [ "$CURRENT_BRANCH" = "HEAD" ]; then
echo "ALREADY_ON_BRANCH"
else
echo "NEEDS_UPDATE"
fi
register: git_branch_check
changed_when: false
failed_when: false
when:
- deployment_method == 'git'
- git_repo_exists.stat.exists
- application_skip_git_update | default(false) | bool == false
- name: Update repository (if exists and not already on correct branch) (Git)
ansible.builtin.git:
repo: "{{ git_repo_url }}"
dest: "{{ application_code_dest }}"
version: "{{ application_git_branch }}"
force: yes
update: yes
when:
- deployment_method == 'git'
- git_repo_exists.stat.exists
- application_skip_git_update | default(false) | bool == false
- git_branch_check.stdout | default('NEEDS_UPDATE') == 'NEEDS_UPDATE'
environment:
GIT_TERMINAL_PROMPT: "0"
vars:
ansible_become: no
register: git_update_result
retries: "{{ application_git_retries | default(5) }}"
delay: "{{ application_git_retry_delay | default(10) }}"
until: git_update_result is succeeded
ignore_errors: yes
- name: Skip git update (repository already on correct branch or skip flag set)
ansible.builtin.debug:
msg: "Skipping git update - repository already on correct branch or skip_git_update is set"
when:
- deployment_method == 'git'
- git_repo_exists.stat.exists
- (application_skip_git_update | default(false) | bool == true) or (git_branch_check.stdout | default('NEEDS_UPDATE') == 'ALREADY_ON_BRANCH')
- name: Fail if git update failed after retries (Git)
ansible.builtin.fail:
msg: "Failed to update repository after {{ application_git_retries | default(5) }} retries. Gitea may be unreachable or overloaded. Last error: {{ git_update_result.msg | default('Unknown error') }}"
when:
- deployment_method == 'git'
- git_repo_exists.stat.exists
- application_skip_git_update | default(false) | bool == false
- git_branch_check.stdout | default('NEEDS_UPDATE') == 'NEEDS_UPDATE'
- git_update_result is defined
- git_update_result is failed
- name: Set ownership of repository files (Git)
ansible.builtin.file:
path: "{{ application_code_dest }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
recurse: yes
become: yes
when: deployment_method == 'git'
# Rsync Deployment Tasks
- name: Clear destination directory before sync (Rsync)
ansible.builtin.shell: |
# Remove all files and directories except .git (if it exists)
find {{ application_code_dest }} -mindepth 1 -maxdepth 1 -not -name '.git' -exec rm -rf {} + 2>/dev/null || true
become: yes
changed_when: true
failed_when: false
register: clear_result
when: deployment_method == 'rsync'
- name: Display clear status (Rsync)
ansible.builtin.debug:
msg: "Cleared destination directory before sync (preserved .git if present)"
when:
- deployment_method == 'rsync'
- clear_result.rc | default(0) == 0
- application_show_status | default(true) | bool
- name: Synchronize application code from repository root (Rsync)
ansible.builtin.synchronize:
src: "{{ application_rsync_source }}/"
dest: "{{ application_code_dest }}/"
delete: no
recursive: yes
rsync_opts: "{{ application_rsync_opts | default(['--chmod=D755,F644', '--exclude=.git', '--exclude=.gitignore', '--exclude=node_modules', '--exclude=vendor', '--exclude=.env', '--exclude=.env.*', '--exclude=*.log', '--exclude=.idea', '--exclude=.vscode', '--exclude=.DS_Store', '--exclude=*.swp', '--exclude=*.swo', '--exclude=*~', '--exclude=.phpunit.result.cache', '--exclude=coverage', '--exclude=.phpunit.cache', '--exclude=public/assets', '--exclude=storage/logs', '--exclude=storage/framework/cache', '--exclude=storage/framework/sessions', '--exclude=storage/framework/views', '--exclude=deployment', '--exclude=docker', '--exclude=.deployment-archive-*', '--exclude=docs', '--exclude=tests']) }}"
when: deployment_method == 'rsync'
delegate_to: localhost
run_once: true
- name: Ensure executable permissions on PHP scripts (Rsync)
ansible.builtin.file:
path: "{{ application_code_dest }}/{{ item }}"
mode: '0755'
loop: "{{ application_php_scripts | default(['worker.php', 'console.php']) }}"
when:
- deployment_method == 'rsync'
- item is defined
ignore_errors: yes
- name: Verify critical files exist (Rsync)
ansible.builtin.stat:
path: "{{ application_code_dest }}/{{ item }}"
register: critical_files_check
loop: "{{ application_critical_files | default(['worker.php', 'console.php', 'composer.json']) }}"
when: deployment_method == 'rsync'
- name: Display file verification results (Rsync)
ansible.builtin.debug:
msg: |
File Verification:
{% for result in critical_files_check.results | default([]) %}
- {{ result.item }}: {{ 'EXISTS' if result.stat.exists else 'MISSING' }}
{% endfor %}
when:
- deployment_method == 'rsync'
- application_show_status | default(true) | bool
- critical_files_check is defined
- name: Fail if critical files are missing (Rsync)
ansible.builtin.fail:
msg: |
Critical files are missing after sync:
{% for result in critical_files_check.results | default([]) %}
{% if not result.stat.exists %}- {{ result.item }}{% endif %}
{% endfor %}
when:
- deployment_method == 'rsync'
- critical_files_check is defined
- critical_files_check.results | selectattr('stat.exists', 'equalto', false) | list | length > 0
- name: Display deployment summary
ansible.builtin.debug:
msg: |
========================================
Application Code Deployment Summary
========================================
Method: {{ deployment_method | upper }}
Destination: {{ application_code_dest }}
{% if deployment_method == 'git' %}
Repository: {{ git_repo_url }}
Branch: {{ application_git_branch }}
{% elif deployment_method == 'rsync' %}
Source: {{ application_rsync_source }}
{% endif %}
========================================
when: application_show_status | default(true) | bool

View File

@@ -0,0 +1,80 @@
---
# Health Check Tasks
- name: Get container status
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} ps {{ application_container_status_services | default('queue-worker web scheduler php') }}
register: container_status
changed_when: false
- name: Display container status
ansible.builtin.debug:
msg: |
{{ container_status.stdout }}
when: application_show_status | default(true) | bool
- name: Get queue-worker logs (last N lines)
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} logs --tail={{ application_health_check_logs_tail | default(20) }} queue-worker 2>&1 || true
register: queue_worker_logs
changed_when: false
- name: Display queue-worker logs
ansible.builtin.debug:
msg: |
================
Queue-Worker Logs:
================
{{ queue_worker_logs.stdout }}
when: application_show_status | default(true) | bool
- name: Get scheduler logs (last N lines)
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} logs --tail={{ application_health_check_logs_tail | default(20) }} scheduler 2>&1 || true
register: scheduler_logs
changed_when: false
- name: Display scheduler logs
ansible.builtin.debug:
msg: |
================
Scheduler Logs:
================
{{ scheduler_logs.stdout }}
when: application_show_status | default(true) | bool
- name: Get web container logs (last N lines)
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} logs --tail={{ application_health_check_logs_tail | default(20) }} web 2>&1 || true
register: web_logs
changed_when: false
- name: Display web container logs
ansible.builtin.debug:
msg: |
================
Web Container Logs:
================
{{ web_logs.stdout }}
when: application_show_status | default(true) | bool
- name: Get all container status (final status check)
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} ps
register: all_containers
changed_when: false
when: application_health_check_final | default(false) | bool
- name: Display all container status (final)
ansible.builtin.debug:
msg: |
{{ all_containers.stdout }}
when:
- application_health_check_final | default(false) | bool
- application_show_status | default(true) | bool

View File

@@ -0,0 +1,155 @@
---
# Log Analysis Tasks
- name: Get queue-worker logs
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} logs --tail={{ application_logs_tail | default(50) }} queue-worker 2>&1 || true
register: queue_worker_logs
changed_when: false
- name: Display queue-worker logs
ansible.builtin.debug:
var: queue_worker_logs.stdout_lines
when: application_show_status | default(true) | bool
- name: Get scheduler logs
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} logs --tail={{ application_logs_tail | default(50) }} scheduler 2>&1 || true
register: scheduler_logs
changed_when: false
- name: Display scheduler logs
ansible.builtin.debug:
var: scheduler_logs.stdout_lines
when: application_show_status | default(true) | bool
- name: Get web container logs
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} logs --tail={{ application_logs_tail | default(50) }} web 2>&1 || true
register: web_logs
changed_when: false
- name: Display web container logs
ansible.builtin.debug:
var: web_logs.stdout_lines
when: application_show_status | default(true) | bool
- name: Check if vendor/autoload.php exists in queue-worker container
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} exec -T queue-worker test -f /var/www/html/vendor/autoload.php && echo "EXISTS" || echo "MISSING"
register: queue_worker_vendor_check
changed_when: false
failed_when: false
ignore_errors: yes
when: application_logs_check_vendor | default(true) | bool
- name: Display queue-worker vendor check
ansible.builtin.debug:
msg: "vendor/autoload.php in queue-worker: {{ queue_worker_vendor_check.stdout | default('CHECK_FAILED') }}"
when:
- application_logs_check_vendor | default(true) | bool
- application_show_status | default(true) | bool
- name: Check if vendor/autoload.php exists in scheduler container
ansible.builtin.shell: |
cd {{ application_code_dest }}
docker compose -f docker-compose.base.yml -f docker-compose.{{ application_compose_suffix }} exec -T scheduler test -f /var/www/html/vendor/autoload.php && echo "EXISTS" || echo "MISSING"
register: scheduler_vendor_check
changed_when: false
failed_when: false
ignore_errors: yes
when: application_logs_check_vendor | default(true) | bool
- name: Display scheduler vendor check
ansible.builtin.debug:
msg: "vendor/autoload.php in scheduler: {{ scheduler_vendor_check.stdout | default('CHECK_FAILED') }}"
when:
- application_logs_check_vendor | default(true) | bool
- application_show_status | default(true) | bool
- name: Check vendor directory permissions on host
ansible.builtin.shell: |
ls -la {{ application_code_dest }}/vendor 2>&1 | head -5 || echo "DIRECTORY_NOT_FOUND"
register: vendor_perms
changed_when: false
when: application_logs_check_permissions | default(true) | bool
- name: Display vendor directory permissions
ansible.builtin.debug:
msg: |
Vendor directory permissions on host:
{{ vendor_perms.stdout }}
when:
- application_logs_check_permissions | default(true) | bool
- application_show_status | default(true) | bool
- name: Check if worker.php exists on host
ansible.builtin.stat:
path: "{{ application_code_dest }}/worker.php"
register: worker_file_host
when: application_logs_check_files | default(true) | bool
- name: Display worker.php host check result
ansible.builtin.debug:
msg: |
worker.php on host:
- Exists: {{ worker_file_host.stat.exists | default(false) }}
{% if worker_file_host.stat.exists %}
- Path: {{ worker_file_host.stat.path }}
- Size: {{ worker_file_host.stat.size | default(0) }} bytes
{% endif %}
when:
- application_logs_check_files | default(true) | bool
- application_show_status | default(true) | bool
- name: Check if console.php exists on host
ansible.builtin.stat:
path: "{{ application_code_dest }}/console.php"
register: console_file_host
when: application_logs_check_files | default(true) | bool
- name: Display console.php host check result
ansible.builtin.debug:
msg: |
console.php on host:
- Exists: {{ console_file_host.stat.exists | default(false) }}
{% if console_file_host.stat.exists %}
- Path: {{ console_file_host.stat.path }}
- Size: {{ console_file_host.stat.size | default(0) }} bytes
{% endif %}
when:
- application_logs_check_files | default(true) | bool
- application_show_status | default(true) | bool
- name: List files in application directory
ansible.builtin.shell: |
ls -la {{ application_code_dest }}/ | head -20
register: app_dir_listing
changed_when: false
when: application_logs_list_files | default(false) | bool
- name: Display application directory listing
ansible.builtin.debug:
var: app_dir_listing.stdout_lines
when:
- application_logs_list_files | default(false) | bool
- application_show_status | default(true) | bool
- name: Check what PHP files exist in application directory
ansible.builtin.shell: |
find {{ application_code_dest }} -maxdepth 1 -name "*.php" -type f 2>/dev/null | head -20
register: php_files
changed_when: false
when: application_logs_list_files | default(false) | bool
- name: Display PHP files found
ansible.builtin.debug:
var: php_files.stdout_lines
when:
- application_logs_list_files | default(false) | bool
- application_show_status | default(true) | bool

View File

@@ -5,6 +5,44 @@
state: directory
mode: '0755'
- name: Ensure secrets directory exists for Docker Compose secrets
file:
path: "{{ application_stack_dest }}/secrets"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0700'
- name: Ensure parent directory exists for application code
file:
path: "/home/deploy/michaelschiemer"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
when: application_compose_suffix == 'production.yml'
become: yes
- name: Ensure application code directory exists
file:
path: "/home/deploy/michaelschiemer/current"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
when: application_compose_suffix == 'production.yml'
become: yes
ignore_errors: yes
- name: Fix ownership of application code directory if needed
command: chown -R {{ ansible_user }}:{{ ansible_user }} /home/deploy/michaelschiemer/current
when:
- application_compose_suffix == 'production.yml'
- ansible_check_mode is not defined or not ansible_check_mode
become: yes
changed_when: false
failed_when: false
- name: Check if vault file exists locally
stat:
path: "{{ application_vault_file }}"
@@ -21,26 +59,70 @@
delegate_to: localhost
become: no
- name: Check if PostgreSQL .env exists on target host
- name: Check if PostgreSQL Production .env exists on target host
stat:
path: "{{ stacks_base_path }}/postgresql-production/.env"
register: application_postgres_production_env_file
changed_when: false
- name: Check if PostgreSQL Staging .env exists on target host (for staging deployments)
stat:
path: "{{ stacks_base_path }}/postgresql-staging/.env"
register: application_postgres_staging_env_file
changed_when: false
when: application_compose_suffix == 'staging'
- name: Extract PostgreSQL Production password from .env file
shell: "grep '^POSTGRES_PASSWORD=' {{ stacks_base_path }}/postgresql-production/.env 2>/dev/null | cut -d'=' -f2- || echo ''"
register: application_postgres_production_password
changed_when: false
failed_when: false
when: application_postgres_production_env_file.stat.exists
no_log: yes
- name: Extract PostgreSQL Staging password from .env file
shell: "grep '^POSTGRES_PASSWORD=' {{ stacks_base_path }}/postgresql-staging/.env 2>/dev/null | cut -d'=' -f2- || echo ''"
register: application_postgres_staging_password
changed_when: false
failed_when: false
when:
- application_compose_suffix == 'staging'
- application_postgres_staging_env_file.stat.exists
no_log: yes
- name: "Fallback: Check if legacy PostgreSQL .env exists on target host"
stat:
path: "{{ stacks_base_path }}/postgresql/.env"
register: application_postgres_env_file
changed_when: false
when: not (application_postgres_production_env_file.stat.exists | default(false))
- name: Extract PostgreSQL password from .env file
- name: "Fallback: Extract PostgreSQL password from legacy .env file"
shell: "grep '^POSTGRES_PASSWORD=' {{ stacks_base_path }}/postgresql/.env 2>/dev/null | cut -d'=' -f2- || echo ''"
register: application_postgres_password
changed_when: false
failed_when: false
when: application_postgres_env_file.stat.exists
when:
- not (application_postgres_production_env_file.stat.exists | default(false))
- application_postgres_env_file.stat.exists
no_log: yes
- name: Determine application database password
set_fact:
application_db_password: >-
{{ (application_postgres_env_file.stat.exists and application_postgres_password.stdout != '') |
ternary(application_postgres_password.stdout,
vault_db_root_password | default(lookup('password', '/dev/null length=32 chars=ascii_letters,digits,punctuation'))) }}
{% if application_compose_suffix == 'staging' %}
{{ (application_postgres_staging_env_file.stat.exists | default(false) and application_postgres_staging_password.stdout | default('') != '') |
ternary(application_postgres_staging_password.stdout,
(application_postgres_env_file.stat.exists | default(false) and application_postgres_password.stdout | default('') != '') |
ternary(application_postgres_password.stdout,
vault_db_root_password | default(lookup('password', '/dev/null length=32 chars=ascii_letters,digits,punctuation')))) }}
{% else %}
{{ (application_postgres_production_env_file.stat.exists | default(false) and application_postgres_production_password.stdout | default('') != '') |
ternary(application_postgres_production_password.stdout,
(application_postgres_env_file.stat.exists | default(false) and application_postgres_password.stdout | default('') != '') |
ternary(application_postgres_password.stdout,
vault_db_root_password | default(lookup('password', '/dev/null length=32 chars=ascii_letters,digits,punctuation')))) }}
{% endif %}
no_log: yes
- name: Determine application redis password
@@ -73,32 +155,55 @@
application_encryption_key: "{{ encryption_key | default(vault_encryption_key | default('')) }}"
no_log: yes
- name: Check if application docker-compose.base.yml source exists locally
- name: Determine project root directory
set_fact:
project_root: "{{ playbook_dir | default(role_path + '/..') | dirname | dirname | dirname }}"
changed_when: false
- name: Check if application docker-compose.base.yml source exists locally (in project root)
stat:
path: "{{ application_stack_src }}/docker-compose.base.yml"
path: "{{ project_root }}/docker-compose.base.yml"
delegate_to: localhost
register: application_compose_base_src
become: no
- name: Check if application docker-compose override file exists locally (production or staging)
stat:
path: "{{ application_stack_src }}/../../../docker-compose.{{ application_compose_suffix }}"
path: "{{ project_root }}/docker-compose.{{ application_compose_suffix }}"
delegate_to: localhost
register: application_compose_override_src
become: no
- name: Copy application docker-compose.base.yml to target host
- name: Check if production-base.yml exists (preferred for production/staging)
stat:
path: "{{ project_root }}/docker-compose.production-base.yml"
delegate_to: localhost
register: application_compose_production_base_src
become: no
- name: Copy application docker-compose.production-base.yml to target host (production/staging)
copy:
src: "{{ application_stack_src }}/docker-compose.base.yml"
src: "{{ project_root }}/docker-compose.production-base.yml"
dest: "{{ application_stack_dest }}/docker-compose.base.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
when: application_compose_base_src.stat.exists
when: application_compose_production_base_src.stat.exists
- name: Copy application docker-compose.base.yml to target host (fallback)
copy:
src: "{{ project_root }}/docker-compose.base.yml"
dest: "{{ application_stack_dest }}/docker-compose.base.yml"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
when:
- not application_compose_production_base_src.stat.exists
- application_compose_base_src.stat.exists
- name: Copy application docker-compose override file to target host (production or staging)
copy:
src: "{{ application_stack_src }}/../../../docker-compose.{{ application_compose_suffix }}"
src: "{{ project_root }}/docker-compose.{{ application_compose_suffix }}"
dest: "{{ application_stack_dest }}/docker-compose.{{ application_compose_suffix }}"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
@@ -141,14 +246,69 @@
mode: '0644'
when: application_nginx_src.stat.exists
- name: Expose secrets for template rendering
- name: Debug - Check available variables before set_fact
debug:
msg:
- "application_environment: {{ application_environment | default('NOT SET') }}"
- "app_env: {{ app_env | default('NOT SET') }}"
- "application_compose_suffix: {{ application_compose_suffix | default('NOT SET') }}"
- "app_domain (from vars): {{ 'DEFINED' if app_domain is defined else 'NOT SET' }}"
- "db_user_default: {{ db_user_default | default('NOT SET') }}"
- "db_name_default: {{ db_name_default | default('NOT SET') }}"
- "db_host_default: {{ db_host_default | default('NOT SET') }}"
- "application_db_password: {{ 'SET (length: ' + (application_db_password | default('') | string | length | string) + ')' if (application_db_password | default('') | string | trim) != '' else 'NOT SET' }}"
- "application_redis_password: {{ 'SET (length: ' + (application_redis_password | default('') | string | length | string) + ')' if (application_redis_password | default('') | string | trim) != '' else 'NOT SET' }}"
- "application_app_key: {{ 'SET (length: ' + (application_app_key | default('') | string | length | string) + ')' if (application_app_key | default('') | string | trim) != '' else 'NOT SET' }}"
changed_when: false
- name: Determine application environment for domain resolution
set_fact:
db_password: "{{ application_db_password }}"
redis_password: "{{ application_redis_password }}"
app_key: "{{ application_app_key }}"
encryption_key: "{{ application_encryption_key }}"
db_username: "{{ db_user | default(db_user_default) }}"
db_name: "{{ db_name | default(db_name_default) }}"
_app_env: "{{ app_env | default(application_environment | default('production')) }}"
no_log: yes
- name: Expose secrets for template rendering (step 1 - basic vars)
set_fact:
db_password: "{{ application_db_password | default('') }}"
redis_password: "{{ application_redis_password | default('') }}"
app_key: "{{ application_app_key | default('') }}"
encryption_key: "{{ application_encryption_key | default('') }}"
app_env: "{{ _app_env }}"
minio_root_user: "{{ minio_root_user | default('minioadmin') }}"
minio_root_password: "{{ minio_root_password | default('') }}"
no_log: yes
- name: Expose secrets for template rendering (step 2 - db vars)
set_fact:
db_username: "{{ db_user | default(db_user_default | default('postgres')) }}"
db_name: "{{ db_name | default(db_name_default | default('michaelschiemer')) }}"
no_log: yes
- name: Expose secrets for template rendering (step 3 - db_host with conditional)
set_fact:
db_host: >-
{%- if db_host is defined and db_host | string | trim != '' -%}
{{ db_host }}
{%- elif db_host_default is defined and db_host_default | string | trim != '' -%}
{{ db_host_default }}
{%- elif application_compose_suffix == 'production.yml' -%}
postgres-production
{%- elif application_compose_suffix == 'staging.yml' -%}
postgres-staging
{%- else -%}
postgres
{%- endif -%}
no_log: yes
- name: Expose secrets for template rendering (step 4 - app_domain)
set_fact:
app_domain: >-
{%- if app_domain is defined and app_domain | string | trim != '' -%}
{{ app_domain }}
{%- elif _app_env == 'production' -%}
michaelschiemer.de
{%- else -%}
staging.michaelschiemer.de
{%- endif -%}
no_log: yes
- name: Render application environment file
@@ -158,3 +318,21 @@
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0600'
- name: Create Docker Compose secret files from determined passwords
copy:
content: "{{ item.value }}"
dest: "{{ application_stack_dest }}/secrets/{{ item.name }}.txt"
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0600'
loop:
- name: db_user_password
value: "{{ application_db_password }}"
- name: redis_password
value: "{{ application_redis_password }}"
- name: app_key
value: "{{ application_app_key }}"
- name: vault_encryption_key
value: "{{ application_encryption_key | default(application_app_key) }}"
no_log: yes

View File

@@ -0,0 +1,17 @@
---
# Handlers for Gitea Role
- name: wait for gitea
ansible.builtin.uri:
url: "{{ gitea_url }}/api/healthz"
method: GET
status_code: [200]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: gitea_health_handler
until: gitea_health_handler.status == 200
retries: "{{ gitea_restart_retries | default(30) }}"
delay: "{{ gitea_restart_delay | default(2) }}"
changed_when: false
ignore_errors: yes

View File

@@ -0,0 +1,131 @@
---
# Update Gitea Configuration (app.ini)
- name: Verify Gitea container exists
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml ps {{ gitea_container_name }} | grep -q "{{ gitea_container_name }}"
register: gitea_exists
changed_when: false
failed_when: false
- name: Fail if Gitea container does not exist
ansible.builtin.fail:
msg: "Gitea container does not exist. Please deploy Gitea stack first."
when: gitea_exists.rc != 0
- name: Get database configuration from environment
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} env | grep -E "^GITEA__database__" || true
register: gitea_db_env
changed_when: false
failed_when: false
- name: Parse database configuration
ansible.builtin.set_fact:
gitea_db_type: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__DB_TYPE=([^\n]+)', '\\1') or ['postgres']) | first }}"
gitea_db_host: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__HOST=([^\n]+)', '\\1') or ['postgres:5432']) | first }}"
gitea_db_name: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__NAME=([^\n]+)', '\\1') or ['gitea']) | first }}"
gitea_db_user: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__USER=([^\n]+)', '\\1') or ['gitea']) | first }}"
gitea_db_passwd: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__PASSWD=([^\n]+)', '\\1') or ['gitea_password']) | first }}"
- name: Get Gitea server configuration from environment
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} env | grep -E "^GITEA__server__" || true
register: gitea_server_env
changed_when: false
failed_when: false
- name: Parse server configuration
ansible.builtin.set_fact:
gitea_domain_parsed: "{{ (gitea_server_env.stdout | default('') | regex_search('GITEA__server__DOMAIN=([^\n]+)', '\\1') or [gitea_domain | default('git.michaelschiemer.de')]) | first }}"
ssh_port_parsed: "{{ (gitea_server_env.stdout | default('') | regex_search('GITEA__server__SSH_PORT=([^\n]+)', '\\1') or ['2222']) | first }}"
- name: Set final configuration variables
ansible.builtin.set_fact:
gitea_domain: "{{ gitea_domain_parsed }}"
ssh_port: "{{ ssh_port_parsed }}"
ssh_listen_port: "{{ ssh_port_parsed }}"
- name: Extract database host and port
ansible.builtin.set_fact:
gitea_db_hostname: "{{ gitea_db_host.split(':')[0] }}"
gitea_db_port: "{{ (gitea_db_host.split(':')[1]) | default('5432') }}"
- name: Set Redis password
ansible.builtin.set_fact:
redis_password: "{{ vault_gitea_redis_password | default(vault_redis_password | default('gitea_redis_password')) }}"
- name: Generate app.ini from template
ansible.builtin.template:
src: "{{ gitea_app_ini_template | default('../../templates/gitea-app.ini.j2') }}"
dest: "{{ gitea_app_ini_path }}"
mode: '0644'
vars:
postgres_db: "{{ gitea_db_name }}"
postgres_user: "{{ gitea_db_user }}"
postgres_password: "{{ gitea_db_passwd }}"
gitea_domain: "{{ gitea_domain }}"
ssh_port: "{{ ssh_port }}"
ssh_listen_port: "{{ ssh_listen_port }}"
disable_registration: true
redis_password: "{{ redis_password }}"
- name: Copy app.ini to Gitea container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml cp {{ gitea_app_ini_path }} {{ gitea_container_name }}:{{ gitea_app_ini_container_path }}
ignore_errors: yes
- name: Wait for container to be ready for exec
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} true
register: container_ready
until: container_ready.rc == 0
retries: "{{ gitea_config_retries | default(30) }}"
delay: "{{ gitea_config_delay | default(2) }}"
changed_when: false
- name: Set correct permissions on app.ini in container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T --user git {{ gitea_container_name }} chown 1000:1000 {{ gitea_app_ini_container_path }} && \
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T --user git {{ gitea_container_name }} chmod 644 {{ gitea_app_ini_container_path }}
- name: Restart Gitea container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml restart {{ gitea_container_name }}
register: gitea_restart
changed_when: gitea_restart.rc == 0
notify: wait for gitea
- name: Wait for Gitea to be ready after restart
ansible.builtin.uri:
url: "{{ gitea_url }}/api/healthz"
method: GET
status_code: [200]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: gitea_health_after_restart
until: gitea_health_after_restart.status == 200
retries: "{{ gitea_restart_retries | default(30) }}"
delay: "{{ gitea_restart_delay | default(5) }}"
when: gitea_restart.changed | default(false)
changed_when: false
- name: Display success message
ansible.builtin.debug:
msg: |
========================================
Gitea Configuration Update Complete
========================================
Gitea configuration has been updated successfully!
Changes applied:
- Redis cache enabled (persistent, survives container restarts)
- Redis sessions enabled (better performance and scalability)
- Redis queue enabled (persistent job processing)
- Database connection pooling configured
- Connection limits set to prevent "Connection reset by peer" errors
Gitea should now be more stable and perform better with Redis.
========================================
when: gitea_show_status | default(true) | bool

View File

@@ -0,0 +1,258 @@
---
# Setup Gitea Repository
- name: Set repository variables from parameters
ansible.builtin.set_fact:
repo_name: "{{ gitea_repo_name | default('michaelschiemer') }}"
repo_owner: "{{ gitea_repo_owner | default('michael') }}"
repo_private: "{{ gitea_repo_private | default(false) | bool }}"
repo_description: "{{ gitea_repo_description | default('Main application repository') }}"
repo_auto_init: "{{ gitea_repo_auto_init | default(false) | bool }}"
configure_git_remote: "{{ gitea_configure_git_remote | default(true) | bool }}"
git_repo_path: "{{ gitea_git_repo_path | default('/home/michael/dev/michaelschiemer') }}"
- name: Verify Gitea is accessible
ansible.builtin.uri:
url: "{{ gitea_url }}"
method: GET
status_code: [200, 302, 502]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: gitea_health
failed_when: false
- name: Debug Gitea health status
ansible.builtin.debug:
msg: "Gitea health check returned status: {{ gitea_health.status }}"
when: gitea_show_status | default(true) | bool
- name: Fail if Gitea is not accessible
ansible.builtin.fail:
msg: "Gitea is not accessible at {{ gitea_url }}. Status: {{ gitea_health.status }}. Please check if Gitea is running."
when: gitea_health.status not in [200, 302, 502]
- name: Check if API token exists in vault
ansible.builtin.set_fact:
has_vault_token: "{{ vault_git_token is defined and vault_git_token | length > 0 }}"
no_log: true
- name: Get or create Gitea API token
ansible.builtin.uri:
url: "{{ gitea_url }}/api/v1/users/{{ gitea_admin_username }}/tokens"
method: POST
user: "{{ gitea_admin_username }}"
password: "{{ gitea_admin_password }}"
body_format: json
body:
name: "ansible-repo-setup-{{ ansible_date_time.epoch }}"
scopes:
- write:repository
- read:repository
- admin:repo
status_code: [201, 400, 401, 502]
validate_certs: false
force_basic_auth: yes
register: api_token_result
failed_when: false
when: not has_vault_token
no_log: true
- name: Extract API token from response
ansible.builtin.set_fact:
gitea_api_token: "{{ api_token_result.json.sha1 | default('') }}"
when:
- not has_vault_token
- api_token_result.status == 201
- api_token_result.json.sha1 is defined
no_log: true
- name: Use existing API token from vault
ansible.builtin.set_fact:
gitea_api_token: "{{ vault_git_token }}"
when: has_vault_token
no_log: true
- name: Set flag to use basic auth if token creation failed
ansible.builtin.set_fact:
use_basic_auth: "{{ gitea_api_token | default('') | length == 0 }}"
no_log: true
- name: Fail if no authentication method available
ansible.builtin.fail:
msg: "Could not create or retrieve Gitea API token, and admin credentials are not available. Please create a token manually or set vault_git_token in vault."
when:
- use_basic_auth | bool
- gitea_admin_password | default('') | length == 0
- name: Initialize repo_check variable
ansible.builtin.set_fact:
repo_check: {"status": 0}
when: repo_check is not defined
- name: Check if repository already exists (with token)
ansible.builtin.uri:
url: "{{ gitea_url }}/api/v1/repos/{{ repo_owner }}/{{ repo_name }}"
method: GET
headers:
Authorization: "token {{ gitea_api_token }}"
status_code: [200, 404, 502]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: repo_check_token
when: not use_basic_auth
failed_when: false
- name: Set repo_check from token result
ansible.builtin.set_fact:
repo_check: "{{ repo_check_token }}"
when:
- not use_basic_auth
- repo_check_token is defined
- name: Check if repository already exists (with basic auth)
ansible.builtin.uri:
url: "{{ gitea_url }}/api/v1/repos/{{ repo_owner }}/{{ repo_name }}"
method: GET
user: "{{ gitea_admin_username }}"
password: "{{ gitea_admin_password }}"
status_code: [200, 404, 502]
validate_certs: false
force_basic_auth: yes
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: repo_check_basic
when: use_basic_auth
failed_when: false
no_log: true
- name: Set repo_check from basic auth result
ansible.builtin.set_fact:
repo_check: "{{ repo_check_basic }}"
when:
- use_basic_auth
- repo_check_basic is defined
- name: Debug repo_check status
ansible.builtin.debug:
msg: "Repository check - Status: {{ repo_check.status | default('undefined') }}, use_basic_auth: {{ use_basic_auth | default('undefined') }}"
when: gitea_show_status | default(true) | bool
- name: Create repository in Gitea (with token)
ansible.builtin.uri:
url: "{{ gitea_url }}/api/v1/user/repos"
method: POST
headers:
Authorization: "token {{ gitea_api_token }}"
Content-Type: "application/json"
body_format: json
body:
name: "{{ repo_name }}"
description: "{{ repo_description }}"
private: "{{ repo_private }}"
auto_init: "{{ repo_auto_init }}"
status_code: [201, 409, 502]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: repo_create_result
when:
- (repo_check.status | default(0)) in [404, 502, 0] or (gitea_force_create_repo | default(false) | bool)
- not use_basic_auth
failed_when: false
- name: Create repository in Gitea (with basic auth)
ansible.builtin.uri:
url: "{{ gitea_url }}/api/v1/user/repos"
method: POST
user: "{{ gitea_admin_username }}"
password: "{{ gitea_admin_password }}"
body_format: json
body:
name: "{{ repo_name }}"
description: "{{ repo_description }}"
private: "{{ repo_private }}"
auto_init: "{{ repo_auto_init }}"
status_code: [201, 409]
validate_certs: false
force_basic_auth: yes
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: repo_create_result
when:
- ((repo_check.status | default(0)) != 200) or (gitea_force_create_repo | default(false) | bool)
- use_basic_auth
no_log: true
- name: Debug repository creation result
ansible.builtin.debug:
msg: "Repository creation - Status: {{ repo_create_result.status | default('undefined') }}, Response: {{ repo_create_result.json | default('no json') }}"
when:
- repo_create_result is defined
- gitea_show_status | default(true) | bool
failed_when: false
- name: Display repository creation result
ansible.builtin.debug:
msg: "Repository {{ repo_owner }}/{{ repo_name }} already exists or was created successfully"
when: repo_check.status | default(0) == 200 or (repo_create_result is defined and repo_create_result.status | default(0) == 201)
- name: Get repository clone URL
ansible.builtin.set_fact:
repo_clone_url: "{{ gitea_url | replace('https://', '') | replace('http://', '') }}/{{ repo_owner }}/{{ repo_name }}.git"
repo_https_url: "https://{{ gitea_admin_username }}:{{ gitea_api_token }}@{{ gitea_url | replace('https://', '') | replace('http://', '') }}/{{ repo_owner }}/{{ repo_name }}.git"
- name: Check if Git repository exists locally
ansible.builtin.stat:
path: "{{ git_repo_path }}/.git"
register: git_repo_exists
when: configure_git_remote | bool
delegate_to: localhost
run_once: true
- name: Configure Git remote (local)
ansible.builtin.command: >
git remote set-url origin {{ repo_clone_url }}
args:
chdir: "{{ git_repo_path }}"
register: git_remote_result
when:
- configure_git_remote | bool
- git_repo_path is defined
- git_repo_exists.stat.exists
delegate_to: localhost
run_once: true
changed_when: git_remote_result.rc == 0
failed_when: false
- name: Add Git remote if it doesn't exist
ansible.builtin.command: >
git remote add origin {{ repo_clone_url }}
args:
chdir: "{{ git_repo_path }}"
register: git_remote_add_result
when:
- configure_git_remote | bool
- git_repo_path is defined
- git_repo_exists.stat.exists
- git_remote_result.rc != 0
delegate_to: localhost
run_once: true
changed_when: git_remote_add_result.rc == 0
failed_when: false
- name: Display success message
ansible.builtin.debug:
msg:
- "========================================"
- "✅ Repository created successfully!"
- "========================================"
- "Repository URL: {{ gitea_url }}/{{ repo_owner }}/{{ repo_name }}"
- "Clone URL: {{ repo_clone_url }}"
- ""
- "Next steps:"
- "1. Push your code: git push -u origin staging"
- "2. Monitor pipeline: {{ gitea_url }}/{{ repo_owner }}/{{ repo_name }}/actions"
- ""
- "Note: If you need to push, you may need to authenticate with:"
- " Username: {{ gitea_admin_username }}"
- " Password: (use vault_gitea_admin_password or create a Personal Access Token)"
- "========================================"
when: gitea_show_status | default(true) | bool

View File

@@ -0,0 +1,329 @@
---
# Gitea Runner Management Tasks
# Supports both fix (diagnose) and register actions
- name: Check if Gitea runner directory exists
ansible.builtin.stat:
path: "{{ gitea_runner_path }}"
register: runner_dir_exists
- name: Fail if runner directory does not exist
ansible.builtin.fail:
msg: "Gitea runner directory not found at {{ gitea_runner_path }}"
when: not runner_dir_exists.stat.exists
- name: Check if runner container is running
ansible.builtin.shell: |
docker ps --format json | jq -r 'select(.Names == "{{ gitea_runner_container_name }}") | .State'
register: runner_container_state
changed_when: false
failed_when: false
- name: Display runner container status
ansible.builtin.debug:
msg: |
Runner Container Status: {{ runner_container_state.stdout | default('NOT RUNNING') }}
when: gitea_runner_show_status | default(true) | bool
- name: Check if .runner file exists
ansible.builtin.stat:
path: "{{ gitea_runner_path }}/data/.runner"
register: runner_file_exists
- name: Read .runner file content (if exists)
ansible.builtin.slurp:
src: "{{ gitea_runner_path }}/data/.runner"
register: runner_file_content
when: runner_file_exists.stat.exists
changed_when: false
- name: Display .runner file content
ansible.builtin.debug:
msg: |
Runner Registration File Content:
{{ runner_file_content.content | b64decode | default('File not found') }}
when:
- runner_file_exists.stat.exists
- gitea_runner_show_status | default(true) | bool
- name: Check for GitHub URLs in .runner file
ansible.builtin.shell: |
grep -i "github.com" "{{ gitea_runner_path }}/data/.runner" 2>/dev/null || echo "NO_GITHUB_URLS"
register: github_urls_check
when: runner_file_exists.stat.exists
changed_when: false
failed_when: false
- name: Display GitHub URLs check result
ansible.builtin.debug:
msg: |
GitHub URLs in .runner file: {{ github_urls_check.stdout }}
when: gitea_runner_show_status | default(true) | bool
- name: Check runner logs for incorrect URLs
ansible.builtin.shell: |
docker logs {{ gitea_runner_container_name }} --tail=100 2>&1 | grep -E "(github.com|instance|repo)" || echo "NO_MATCHES"
register: runner_logs_check
changed_when: false
failed_when: false
- name: Display runner logs analysis
ansible.builtin.debug:
msg: |
Runner Logs Analysis (last 100 lines):
{{ runner_logs_check.stdout }}
when: gitea_runner_show_status | default(true) | bool
- name: Check .env file for GITEA_INSTANCE_URL
ansible.builtin.shell: |
grep "^GITEA_INSTANCE_URL=" "{{ gitea_runner_path }}/.env" 2>/dev/null || echo "NOT_FOUND"
register: env_instance_url
changed_when: false
failed_when: false
- name: Display GITEA_INSTANCE_URL from .env
ansible.builtin.debug:
msg: |
GITEA_INSTANCE_URL in .env: {{ env_instance_url.stdout }}
when: gitea_runner_show_status | default(true) | bool
- name: Check if .env has correct Gitea URL
ansible.builtin.set_fact:
env_has_correct_url: "{{ env_instance_url.stdout is defined and gitea_instance_url in env_instance_url.stdout }}"
- name: Check if runner needs re-registration (for fix action)
ansible.builtin.set_fact:
runner_needs_reregistration: >-
{%- if not runner_file_exists.stat.exists -%}
true
{%- elif 'github.com' in (github_urls_check.stdout | default('')) -%}
true
{%- elif not env_has_correct_url -%}
true
{%- else -%}
false
{%- endif -%}
when: gitea_runner_action | default('fix') == 'fix'
- name: Display re-registration decision
ansible.builtin.debug:
msg: |
Runner Re-registration Needed: {{ runner_needs_reregistration | bool }}
Reasons:
- Runner file exists: {{ runner_file_exists.stat.exists }}
- Contains GitHub URLs: {{ 'github.com' in (github_urls_check.stdout | default('')) }}
- .env has correct URL: {{ env_has_correct_url | bool }}
when:
- gitea_runner_action | default('fix') == 'fix'
- gitea_runner_show_status | default(true) | bool
- name: Fail if registration token is not provided (for register action)
ansible.builtin.fail:
msg: "gitea_runner_registration_token must be provided via -e 'gitea_runner_registration_token=<token>'"
when:
- gitea_runner_action | default('fix') == 'register'
- gitea_runner_registration_token | string | trim == ''
- name: Stop runner container before re-registration (fix action)
ansible.builtin.shell: |
cd {{ gitea_runner_path }}
docker compose stop {{ gitea_runner_container_name }}
when:
- gitea_runner_action | default('fix') == 'fix'
- runner_needs_reregistration | bool
register: stop_runner
changed_when: stop_runner.rc == 0
- name: Stop runner container if running (register action)
ansible.builtin.shell: |
cd {{ gitea_runner_path }}
docker compose stop {{ gitea_runner_container_name }}
when: gitea_runner_action | default('fix') == 'register'
register: stop_result
changed_when: stop_result.rc == 0
failed_when: false
- name: Backup existing .runner file
ansible.builtin.copy:
src: "{{ gitea_runner_path }}/data/.runner"
dest: "{{ gitea_runner_path }}/data/.runner.backup.{{ ansible_date_time.epoch }}"
remote_src: yes
when:
- runner_file_exists.stat.exists
- (gitea_runner_action | default('fix') == 'register') or (runner_needs_reregistration | bool)
ignore_errors: yes
- name: Remove existing .runner file
ansible.builtin.file:
path: "{{ gitea_runner_path }}/data/.runner"
state: absent
when:
- (gitea_runner_action | default('fix') == 'register') or (runner_needs_reregistration | bool)
- name: Update .env file with correct GITEA_INSTANCE_URL (fix action)
ansible.builtin.lineinfile:
path: "{{ gitea_runner_path }}/.env"
regexp: '^GITEA_INSTANCE_URL='
line: "GITEA_INSTANCE_URL={{ gitea_instance_url }}"
create: yes
when:
- gitea_runner_action | default('fix') == 'fix'
- runner_needs_reregistration | bool
register: env_updated
- name: Update .env file with correct configuration (register action)
ansible.builtin.lineinfile:
path: "{{ gitea_runner_path }}/.env"
regexp: '^{{ item.key }}='
line: "{{ item.key }}={{ item.value }}"
create: yes
loop:
- { key: 'GITEA_INSTANCE_URL', value: '{{ gitea_instance_url }}' }
- { key: 'GITEA_RUNNER_REGISTRATION_TOKEN', value: '{{ gitea_runner_registration_token }}' }
- { key: 'GITEA_RUNNER_NAME', value: '{{ gitea_runner_name }}' }
- { key: 'GITEA_RUNNER_LABELS', value: '{{ gitea_runner_labels }}' }
when: gitea_runner_action | default('fix') == 'register'
no_log: true
- name: Display instructions for manual re-registration (fix action)
ansible.builtin.debug:
msg: |
========================================
Runner Re-registration Required
========================================
The runner needs to be re-registered with the correct Gitea instance URL.
Steps to re-register:
1. Get a new registration token from Gitea:
{{ gitea_instance_url }}/admin/actions/runners
Click "Create New Runner" and copy the token
2. Update .env file with the token:
GITEA_RUNNER_REGISTRATION_TOKEN=<your-token>
3. Re-register the runner:
cd {{ gitea_runner_path }}
./register.sh
Or use Ansible to set the token and register:
ansible-playbook -i inventory/production.yml \
playbooks/register-gitea-runner.yml \
-e "gitea_runner_registration_token=<your-token>"
========================================
when:
- gitea_runner_action | default('fix') == 'fix'
- runner_needs_reregistration | bool
- gitea_runner_show_status | default(true) | bool
- name: Start runner services (register action)
ansible.builtin.shell: |
cd {{ gitea_runner_path }}
docker compose up -d
when: gitea_runner_action | default('fix') == 'register'
register: start_services
changed_when: start_services.rc == 0
- name: Wait for services to be ready (register action)
ansible.builtin.pause:
seconds: "{{ gitea_runner_wait_seconds | default(5) }}"
when: gitea_runner_action | default('fix') == 'register'
- name: Register runner with correct Gitea instance (register action)
ansible.builtin.shell: |
cd {{ gitea_runner_path }}
docker compose exec -T {{ gitea_runner_container_name }} act_runner register \
--instance "{{ gitea_instance_url }}" \
--token "{{ gitea_runner_registration_token }}" \
--name "{{ gitea_runner_name }}" \
--labels "{{ gitea_runner_labels }}"
when: gitea_runner_action | default('fix') == 'register'
register: register_result
no_log: true
changed_when: register_result.rc == 0
- name: Display registration result (register action)
ansible.builtin.debug:
msg: |
Runner Registration Result:
{{ register_result.stdout | default('No output') }}
when:
- gitea_runner_action | default('fix') == 'register'
- register_result.rc == 0
- gitea_runner_show_status | default(true) | bool
- name: Verify .runner file was created (register action)
ansible.builtin.stat:
path: "{{ gitea_runner_path }}/data/.runner"
register: runner_file_created
when: gitea_runner_action | default('fix') == 'register'
- name: Check .runner file for correct instance URL (register action)
ansible.builtin.shell: |
grep -i "{{ gitea_instance_url }}" "{{ gitea_runner_path }}/data/.runner" 2>/dev/null || echo "URL_NOT_FOUND"
register: runner_url_check
when:
- gitea_runner_action | default('fix') == 'register'
- runner_file_created.stat.exists
changed_when: false
- name: Check .runner file for GitHub URLs (register action)
ansible.builtin.shell: |
grep -i "github.com" "{{ gitea_runner_path }}/data/.runner" 2>/dev/null || echo "NO_GITHUB_URLS"
register: runner_github_check
when:
- gitea_runner_action | default('fix') == 'register'
- runner_file_created.stat.exists
changed_when: false
- name: Display final status (fix action)
ansible.builtin.debug:
msg: |
========================================
Gitea Runner Configuration Status
========================================
Runner Directory: {{ gitea_runner_path }}
Container Running: {{ 'YES' if runner_container_state.stdout == 'running' else 'NO' }}
Runner File Exists: {{ 'YES' if runner_file_exists.stat.exists else 'NO' }}
Contains GitHub URLs: {{ 'YES' if 'github.com' in (github_urls_check.stdout | default('')) else 'NO' }}
.env has correct URL: {{ 'YES' if env_has_correct_url else 'NO' }}
Re-registration Needed: {{ 'YES' if runner_needs_reregistration | bool else 'NO' }}
========================================
{% if not runner_needs_reregistration | bool %}
✅ Runner configuration looks correct!
{% else %}
⚠️ Runner needs to be re-registered with correct Gitea URL
{% endif %}
when:
- gitea_runner_action | default('fix') == 'fix'
- gitea_runner_show_status | default(true) | bool
- name: Display final status (register action)
ansible.builtin.debug:
msg: |
========================================
Gitea Runner Registration Status
========================================
Registration: {{ '✅ SUCCESS' if register_result.rc == 0 else '❌ FAILED' }}
Runner File Created: {{ '✅ YES' if runner_file_created.stat.exists else '❌ NO' }}
Contains Correct URL: {{ '✅ YES' if 'URL_NOT_FOUND' not in runner_url_check.stdout else '❌ NO' }}
Contains GitHub URLs: {{ '❌ YES' if 'NO_GITHUB_URLS' not in runner_github_check.stdout else '✅ NO' }}
========================================
{% if register_result.rc == 0 and runner_file_created.stat.exists %}
✅ Runner registered successfully with {{ gitea_instance_url }}!
Check runner status:
{{ gitea_instance_url }}/admin/actions/runners
{% else %}
❌ Registration failed. Check logs:
docker logs {{ gitea_runner_container_name }}
{% endif %}
when:
- gitea_runner_action | default('fix') == 'register'
- gitea_runner_show_status | default(true) | bool

View File

@@ -0,0 +1,287 @@
---
# Setup Gitea Initial Configuration
- name: Verify Gitea container exists
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml ps {{ gitea_container_name }} | grep -q "{{ gitea_container_name }}"
register: gitea_exists
changed_when: false
failed_when: false
- name: Fail if Gitea container does not exist
ansible.builtin.fail:
msg: "Gitea container does not exist. Please deploy Gitea stack first using: ansible-playbook -i inventory/production.yml playbooks/setup-infrastructure.yml --tags gitea"
when: gitea_exists.rc != 0
- name: Wait for Gitea to be ready
ansible.builtin.uri:
url: "{{ gitea_url }}/api/healthz"
method: GET
status_code: [200, 404]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: gitea_health
until: gitea_health.status == 200
retries: "{{ gitea_setup_health_retries | default(30) }}"
delay: "{{ gitea_setup_health_delay | default(5) }}"
ignore_errors: yes
changed_when: false
when: not (gitea_force_update_app_ini | default(false) | bool)
- name: Check if Gitea is already configured
ansible.builtin.uri:
url: "{{ gitea_url }}"
method: GET
status_code: [200, 302, 502]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
follow_redirects: none
return_content: yes
register: gitea_main_check
changed_when: false
failed_when: false
- name: Check if app.ini exists in container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} test -f {{ gitea_app_ini_container_path }}
register: gitea_app_ini_exists
changed_when: false
failed_when: false
- name: Check if INSTALL_LOCK is set
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} grep -q "INSTALL_LOCK = true" {{ gitea_app_ini_container_path }} 2>/dev/null || echo "not_set"
register: gitea_install_lock_check
changed_when: false
failed_when: false
when: gitea_app_ini_exists.rc == 0
- name: Determine if Gitea needs setup
ansible.builtin.set_fact:
gitea_needs_setup: "{{ (gitea_force_update_app_ini | default(false) | bool) or ('installation' in (gitea_main_check.content | default('') | lower) or 'initial configuration' in (gitea_main_check.content | default('') | lower)) or (gitea_app_ini_exists.rc != 0) or (gitea_install_lock_check.stdout | default('') | trim == 'not_set') }}"
gitea_already_configured: "{{ not (gitea_force_update_app_ini | default(false) | bool) and 'installation' not in (gitea_main_check.content | default('') | lower) and 'initial configuration' not in (gitea_main_check.content | default('') | lower) and gitea_app_ini_exists.rc == 0 and gitea_install_lock_check.stdout | default('') | trim != 'not_set' }}"
- name: Display setup status
ansible.builtin.debug:
msg: |
Gitea Setup Status:
- Main page status: {{ gitea_main_check.status }}
- app.ini exists: {{ gitea_app_ini_exists.rc == 0 }}
- INSTALL_LOCK set: {{ gitea_install_lock_check.stdout | default('unknown') }}
- Force update: {{ gitea_force_update_app_ini | default(false) }}
- Already configured: {{ gitea_already_configured }}
- Needs setup: {{ gitea_needs_setup }}
when: gitea_show_status | default(true) | bool
- name: Fail if admin password is not set
ansible.builtin.fail:
msg: |
Gitea admin password is not set in vault.
Please set vault_gitea_admin_password in:
- deployment/ansible/secrets/production.vault.yml
To set it, run:
ansible-vault edit secrets/production.vault.yml --vault-password-file secrets/.vault_pass
Then add:
vault_gitea_admin_password: "your-secure-password"
when:
- gitea_needs_setup | bool
- gitea_admin_password | default('') | trim == ''
- name: Get Gitea database configuration from environment
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} env | grep -E "^GITEA__database__" || true
register: gitea_db_env
changed_when: false
failed_when: false
when: gitea_needs_setup | bool
- name: Parse database configuration
ansible.builtin.set_fact:
gitea_db_type: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__DB_TYPE=([^\n]+)', '\\1') or ['postgres']) | first }}"
gitea_db_host: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__HOST=([^\n]+)', '\\1') or ['postgres:5432']) | first }}"
gitea_db_name: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__NAME=([^\n]+)', '\\1') or ['gitea']) | first }}"
gitea_db_user: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__USER=([^\n]+)', '\\1') or ['gitea']) | first }}"
gitea_db_passwd: "{{ (gitea_db_env.stdout | default('') | regex_search('GITEA__database__PASSWD=([^\n]+)', '\\1') or ['gitea_password']) | first }}"
when: gitea_needs_setup | bool
- name: Extract database host and port
ansible.builtin.set_fact:
gitea_db_hostname: "{{ gitea_db_host.split(':')[0] }}"
gitea_db_port: "{{ (gitea_db_host.split(':')[1]) | default('5432') }}"
when: gitea_needs_setup | bool
- name: Get Gitea server configuration from environment
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} env | grep -E "^GITEA__server__" || true
register: gitea_server_env
changed_when: false
failed_when: false
when: gitea_needs_setup | bool
- name: Parse server configuration
ansible.builtin.set_fact:
gitea_domain_config: "{{ (gitea_server_env.stdout | default('') | regex_search('GITEA__server__DOMAIN=([^\n]+)', '\\1') or [gitea_domain]) | first }}"
gitea_root_url: "{{ (gitea_server_env.stdout | default('') | regex_search('GITEA__server__ROOT_URL=([^\n]+)', '\\1') or ['https://' + gitea_domain + '/']) | first }}"
gitea_ssh_domain: "{{ (gitea_server_env.stdout | default('') | regex_search('GITEA__server__SSH_DOMAIN=([^\n]+)', '\\1') or [gitea_domain]) | first }}"
gitea_ssh_port: "{{ (gitea_server_env.stdout | default('') | regex_search('GITEA__server__SSH_PORT=([^\n]+)', '\\1') or ['2222']) | first }}"
when: gitea_needs_setup | bool
- name: Get Gitea service configuration from environment
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} env | grep -E "^GITEA__service__" || true
register: gitea_service_env
changed_when: false
failed_when: false
when: gitea_needs_setup | bool
- name: Parse service configuration
ansible.builtin.set_fact:
gitea_disable_registration: "{{ (gitea_service_env.stdout | default('') | regex_search('GITEA__service__DISABLE_REGISTRATION=([^\n]+)', '\\1') or ['true']) | first | lower }}"
when: gitea_needs_setup | bool
- name: Generate app.ini file
ansible.builtin.template:
src: "{{ gitea_app_ini_template | default('../../templates/gitea-app.ini.j2') }}"
dest: "{{ gitea_app_ini_path }}"
mode: '0644'
vars:
gitea_domain: "{{ gitea_domain_config }}"
postgres_db: "{{ gitea_db_name }}"
postgres_user: "{{ gitea_db_user }}"
postgres_password: "{{ gitea_db_passwd }}"
disable_registration: "{{ gitea_disable_registration == 'true' }}"
ssh_port: "{{ gitea_ssh_port | int }}"
ssh_listen_port: 22
when: gitea_needs_setup | bool
- name: Copy app.ini to Gitea container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml cp {{ gitea_app_ini_path }} {{ gitea_container_name }}:{{ gitea_app_ini_container_path }}
when: gitea_needs_setup | bool
ignore_errors: yes
- name: Wait for container to be ready for exec
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} true
register: container_ready
until: container_ready.rc == 0
retries: "{{ gitea_config_retries | default(30) }}"
delay: "{{ gitea_config_delay | default(2) }}"
when:
- gitea_needs_setup | bool
- not (gitea_force_update_app_ini | default(false) | bool)
changed_when: false
ignore_errors: yes
- name: Set correct permissions on app.ini in container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} chown 1000:1000 {{ gitea_app_ini_container_path }} && \
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} chmod 644 {{ gitea_app_ini_container_path }}
when: gitea_needs_setup | bool
ignore_errors: yes
- name: Restart Gitea container
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml restart {{ gitea_container_name }}
when: gitea_needs_setup | bool
register: gitea_restart
changed_when: gitea_restart.rc == 0
notify: wait for gitea
- name: Wait for Gitea to be ready after restart
ansible.builtin.uri:
url: "{{ gitea_url }}/api/healthz"
method: GET
status_code: [200]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
register: gitea_health_after_restart
until: gitea_health_after_restart.status == 200
retries: "{{ gitea_restart_retries | default(30) }}"
delay: "{{ gitea_restart_delay | default(5) }}"
when:
- not (gitea_force_update_app_ini | default(false) | bool)
- gitea_restart.changed | default(false)
changed_when: false
ignore_errors: yes
- name: Wait for database to be initialized
ansible.builtin.pause:
seconds: "{{ gitea_setup_db_wait | default(10) }}"
when: gitea_needs_setup | bool
- name: Check if admin user already exists
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T {{ gitea_container_name }} \
gitea admin user list --admin | grep -q "{{ gitea_admin_username }}" || echo "not_found"
register: gitea_admin_exists
changed_when: false
failed_when: false
when: gitea_needs_setup | bool
- name: Create admin user
ansible.builtin.shell: |
docker compose -f {{ gitea_stack_path }}/docker-compose.yml exec -T --user git {{ gitea_container_name }} \
gitea admin user create \
--username "{{ gitea_admin_username }}" \
--password "{{ gitea_admin_password }}" \
--email "{{ gitea_admin_email }}" \
--admin \
--must-change-password=false
register: gitea_admin_create_result
when:
- gitea_needs_setup | bool
- gitea_admin_exists.stdout | default('') | trim == 'not_found'
failed_when: gitea_admin_create_result.rc != 0 and 'already exists' not in (gitea_admin_create_result.stderr | default(''))
no_log: true
- name: Verify Gitea is accessible
ansible.builtin.uri:
url: "{{ gitea_url }}"
method: GET
status_code: [200, 302]
validate_certs: false
timeout: "{{ gitea_health_check_timeout | default(10) }}"
follow_redirects: none
register: gitea_access_check
when: gitea_needs_setup | bool
- name: Display success message
ansible.builtin.debug:
msg: |
========================================
✅ Gitea Initial Setup Complete!
========================================
Configuration:
- app.ini created with INSTALL_LOCK = true
- Admin user created: {{ gitea_admin_username }}
- Email: {{ gitea_admin_email }}
Next steps:
1. Access Gitea: {{ gitea_url }}
2. Login with:
- Username: {{ gitea_admin_username }}
- Password: (from vault: vault_gitea_admin_password)
3. Configure Gitea Actions Runner (if needed):
- Go to: {{ gitea_url }}/admin/actions/runners
- Get registration token
- Register runner using: deployment/gitea-runner/register.sh
========================================
when:
- gitea_needs_setup | bool
- gitea_show_status | default(true) | bool
- name: Display already configured message
ansible.builtin.debug:
msg: |
========================================
Gitea is already configured.
========================================
No setup needed. Access Gitea at: {{ gitea_url }}
========================================
when:
- gitea_already_configured | bool
- gitea_show_status | default(true) | bool

View File

@@ -0,0 +1,21 @@
---
# PostgreSQL Production Stack - Default Variables
# Stack path on target host
postgresql_production_stack_path: "{{ stacks_base_path }}/postgresql-production"
# Wait configuration
postgresql_production_wait_timeout: "{{ wait_timeout | default(60) }}"
postgresql_production_wait_interval: 5
# Database configuration (from vault or defaults)
postgresql_production_db_name: "{{ vault_db_name | default('michaelschiemer') }}"
postgresql_production_db_user: "{{ vault_db_user | default('postgres') }}"
postgresql_production_db_password: "{{ vault_db_password | default('') }}"
# Backup configuration
postgresql_production_backup_retention_days: 7
postgresql_production_backup_schedule: "0 2 * * *"
# Template used to generate the PostgreSQL .env file
postgresql_production_env_template: "{{ role_path }}/templates/postgresql.env.j2"

View File

@@ -0,0 +1,69 @@
---
- name: Set PostgreSQL Production variables for template
ansible.builtin.set_fact:
postgres_db: "{{ postgresql_production_db_name }}"
postgres_user: "{{ postgresql_production_db_user }}"
postgres_password: "{{ postgresql_production_db_password }}"
backup_retention_days: "{{ postgresql_production_backup_retention_days }}"
backup_schedule: "{{ postgresql_production_backup_schedule }}"
no_log: yes
- name: Validate PostgreSQL Production password is set
ansible.builtin.fail:
msg: |
PostgreSQL Production password is not set!
Please ensure vault_db_password is defined in:
- {{ vault_file | default('inventory/group_vars/production/vault.yml') }}
Or pass it via extra vars:
-e "postgresql_production_db_password=your-password"
when: (postgresql_production_db_password | default('') | string | trim) == ''
- name: Create PostgreSQL Production .env file from vault secrets
ansible.builtin.template:
src: postgresql.env.j2
dest: "{{ postgresql_production_stack_path }}/.env"
mode: '0600'
- name: Deploy PostgreSQL Production stack
community.docker.docker_compose_v2:
project_src: "{{ postgresql_production_stack_path }}"
state: present
pull: always
register: postgresql_production_compose_result
ignore_errors: yes
- name: Show PostgreSQL Production logs if deployment failed
shell: |
docker compose -f {{ postgresql_production_stack_path }}/docker-compose.yml logs --tail=50 postgres-production
register: postgresql_production_logs
changed_when: false
failed_when: false
when: postgresql_production_compose_result.failed | default(false)
- name: Display PostgreSQL Production logs on failure
ansible.builtin.debug:
msg: "{{ postgresql_production_logs.stdout_lines | default([]) }}"
when: postgresql_production_compose_result.failed | default(false)
- name: Check PostgreSQL Production container status
shell: |
docker compose -f {{ postgresql_production_stack_path }}/docker-compose.yml ps postgres-production | grep -Eiq "Up|running|healthy"
register: postgresql_production_state
changed_when: false
until: postgresql_production_state.rc == 0
retries: "{{ ((postgresql_production_wait_timeout | int) + (postgresql_production_wait_interval | int) - 1) // (postgresql_production_wait_interval | int) }}"
delay: "{{ postgresql_production_wait_interval | int }}"
failed_when: postgresql_production_state.rc != 0
when: not ansible_check_mode
- name: Fail if PostgreSQL Production deployment failed
ansible.builtin.fail:
msg: "PostgreSQL Production stack deployment failed. Check logs above for details."
when: postgresql_production_compose_result.failed | default(false)
- name: Record PostgreSQL Production deployment facts
set_fact:
postgresql_production_stack_changed: "{{ postgresql_production_compose_result.changed | default(false) }}"
postgresql_production_log_hint: ""

View File

@@ -0,0 +1,14 @@
# PostgreSQL Stack Configuration
# Managed by Ansible - DO NOT EDIT MANUALLY
# Timezone
TZ=Europe/Berlin
# PostgreSQL Configuration
POSTGRES_DB={{ postgres_db }}
POSTGRES_USER={{ postgres_user }}
POSTGRES_PASSWORD={{ postgres_password }}
# Backup Configuration
BACKUP_RETENTION_DAYS={{ backup_retention_days }}
BACKUP_SCHEDULE={{ backup_schedule }}

View File

@@ -0,0 +1,21 @@
---
# PostgreSQL Staging Stack - Default Variables
# Stack path on target host
postgresql_staging_stack_path: "{{ stacks_base_path }}/postgresql-staging"
# Wait configuration
postgresql_staging_wait_timeout: "{{ wait_timeout | default(60) }}"
postgresql_staging_wait_interval: 5
# Database configuration (from vault or defaults)
postgresql_staging_db_name: "{{ vault_db_name_staging | default('michaelschiemer_staging') }}"
postgresql_staging_db_user: "{{ vault_db_user | default('postgres') }}"
postgresql_staging_db_password: "{{ vault_db_password_staging | default('') }}"
# Backup configuration (shorter retention for staging)
postgresql_staging_backup_retention_days: 3
postgresql_staging_backup_schedule: "0 3 * * *"
# Template used to generate the PostgreSQL .env file
postgresql_staging_env_template: "{{ role_path }}/templates/postgresql.env.j2"

View File

@@ -0,0 +1,69 @@
---
- name: Set PostgreSQL Staging variables for template
ansible.builtin.set_fact:
postgres_db: "{{ postgresql_staging_db_name }}"
postgres_user: "{{ postgresql_staging_db_user }}"
postgres_password: "{{ postgresql_staging_db_password }}"
backup_retention_days: "{{ postgresql_staging_backup_retention_days }}"
backup_schedule: "{{ postgresql_staging_backup_schedule }}"
no_log: yes
- name: Validate PostgreSQL Staging password is set
ansible.builtin.fail:
msg: |
PostgreSQL Staging password is not set!
Please ensure vault_db_password_staging is defined in:
- {{ vault_file | default('inventory/group_vars/staging/vault.yml') }}
Or pass it via extra vars:
-e "postgresql_staging_db_password=your-password"
when: (postgresql_staging_db_password | default('') | string | trim) == ''
- name: Create PostgreSQL Staging .env file from vault secrets
ansible.builtin.template:
src: postgresql.env.j2
dest: "{{ postgresql_staging_stack_path }}/.env"
mode: '0600'
- name: Deploy PostgreSQL Staging stack
community.docker.docker_compose_v2:
project_src: "{{ postgresql_staging_stack_path }}"
state: present
pull: always
register: postgresql_staging_compose_result
ignore_errors: yes
- name: Show PostgreSQL Staging logs if deployment failed
shell: |
docker compose -f {{ postgresql_staging_stack_path }}/docker-compose.yml logs --tail=50 postgres-staging
register: postgresql_staging_logs
changed_when: false
failed_when: false
when: postgresql_staging_compose_result.failed | default(false)
- name: Display PostgreSQL Staging logs on failure
ansible.builtin.debug:
msg: "{{ postgresql_staging_logs.stdout_lines | default([]) }}"
when: postgresql_staging_compose_result.failed | default(false)
- name: Check PostgreSQL Staging container status
shell: |
docker compose -f {{ postgresql_staging_stack_path }}/docker-compose.yml ps postgres-staging | grep -Eiq "Up|running|healthy"
register: postgresql_staging_state
changed_when: false
until: postgresql_staging_state.rc == 0
retries: "{{ ((postgresql_staging_wait_timeout | int) + (postgresql_staging_wait_interval | int) - 1) // (postgresql_staging_wait_interval | int) }}"
delay: "{{ postgresql_staging_wait_interval | int }}"
failed_when: postgresql_staging_state.rc != 0
when: not ansible_check_mode
- name: Fail if PostgreSQL Staging deployment failed
ansible.builtin.fail:
msg: "PostgreSQL Staging stack deployment failed. Check logs above for details."
when: postgresql_staging_compose_result.failed | default(false)
- name: Record PostgreSQL Staging deployment facts
set_fact:
postgresql_staging_stack_changed: "{{ postgresql_staging_compose_result.changed | default(false) }}"
postgresql_staging_log_hint: ""

View File

@@ -0,0 +1,14 @@
# PostgreSQL Stack Configuration
# Managed by Ansible - DO NOT EDIT MANUALLY
# Timezone
TZ=Europe/Berlin
# PostgreSQL Configuration
POSTGRES_DB={{ postgres_db }}
POSTGRES_USER={{ postgres_user }}
POSTGRES_PASSWORD={{ postgres_password }}
# Backup Configuration
BACKUP_RETENTION_DAYS={{ backup_retention_days }}
BACKUP_SCHEDULE={{ backup_schedule }}

View File

@@ -1,4 +1,10 @@
---
- name: Create PostgreSQL .env file from vault secrets
ansible.builtin.template:
src: postgresql.env.j2
dest: "{{ postgresql_stack_path }}/.env"
mode: '0600'
- name: Deploy PostgreSQL stack
community.docker.docker_compose_v2:
project_src: "{{ postgresql_stack_path }}"

View File

@@ -0,0 +1,14 @@
# PostgreSQL Stack Configuration
# Managed by Ansible - DO NOT EDIT MANUALLY
# Timezone
TZ=Europe/Berlin
# PostgreSQL Configuration
POSTGRES_DB=michaelschiemer
POSTGRES_USER=postgres
POSTGRES_PASSWORD={{ vault_db_password }}
# Backup Configuration
BACKUP_RETENTION_DAYS=7
BACKUP_SCHEDULE=0 2 * * *

View File

@@ -0,0 +1,9 @@
---
# Redis Role Default Variables
# Redis Stack Path
redis_stack_path: "{{ stacks_base_path }}/redis"
# Wait Configuration
redis_wait_timeout: 60
redis_wait_interval: 5

View File

@@ -0,0 +1,49 @@
---
- name: Create Redis .env file from vault secrets
ansible.builtin.template:
src: redis.env.j2
dest: "{{ redis_stack_path }}/.env"
mode: '0600'
tags:
- redis
- config
- name: Deploy Redis stack
community.docker.docker_compose_v2:
project_src: "{{ redis_stack_path }}"
state: present
pull: always
register: redis_compose_result
tags:
- redis
- deploy
- name: Wait for Redis to be healthy
ansible.builtin.shell: |
docker compose -f {{ redis_stack_path }}/docker-compose.yml ps redis-stack | grep -Eiq "Up|running"
register: redis_state
changed_when: false
until: redis_state.rc == 0
retries: "{{ ((redis_wait_timeout | int) + (redis_wait_interval | int) - 1) // (redis_wait_interval | int) }}"
delay: "{{ redis_wait_interval | int }}"
failed_when: redis_state.rc != 0
tags:
- redis
- healthcheck
- name: Test Redis connection
ansible.builtin.shell: |
docker exec redis-stack redis-cli ping
register: redis_ping
changed_when: false
failed_when: redis_ping.stdout != "PONG"
tags:
- redis
- healthcheck
- name: Set Redis deployment facts
ansible.builtin.set_fact:
redis_stack_changed: "{{ redis_compose_result.changed }}"
redis_stack_healthy: "{{ redis_state.rc == 0 }}"
tags:
- redis

View File

@@ -0,0 +1,8 @@
# Redis Stack Configuration
# Managed by Ansible - DO NOT EDIT MANUALLY
# Timezone
TZ=Europe/Berlin
# Redis Password (REQUIRED in production)
REDIS_PASSWORD={{ vault_redis_password }}

View File

@@ -45,6 +45,44 @@
}}
no_log: true
- name: Generate REGISTRY_HTTP_SECRET if not set
set_fact:
registry_http_secret: "{{ lookup('password', '/dev/null length=64 chars=hexdigits') }}"
no_log: true
- name: Check if Registry .env file exists
ansible.builtin.stat:
path: "{{ registry_stack_path }}/.env"
register: registry_env_file
- name: Read existing REGISTRY_HTTP_SECRET from .env if exists
ansible.builtin.shell: |
grep '^REGISTRY_HTTP_SECRET=' "{{ registry_stack_path }}/.env" 2>/dev/null | cut -d'=' -f2- || echo ''
register: existing_registry_secret
changed_when: false
failed_when: false
when: registry_env_file.stat.exists
no_log: true
- name: Use existing REGISTRY_HTTP_SECRET if available
set_fact:
registry_http_secret: "{{ existing_registry_secret.stdout | default(registry_http_secret) }}"
when:
- registry_env_file.stat.exists
- existing_registry_secret.stdout | default('') | string | trim != ''
no_log: true
- name: Create or update Registry .env file
ansible.builtin.lineinfile:
path: "{{ registry_stack_path }}/.env"
regexp: '^REGISTRY_HTTP_SECRET='
line: "REGISTRY_HTTP_SECRET={{ registry_http_secret }}"
create: yes
mode: '0600'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
no_log: true
- name: Create Registry htpasswd file if missing
shell: |
docker run --rm --entrypoint htpasswd httpd:2 -Bbn {{ registry_username }} {{ registry_password }} > {{ registry_auth_path }}/htpasswd

View File

@@ -0,0 +1,8 @@
---
# Handlers for Traefik Role
- name: wait for traefik
ansible.builtin.wait_for:
timeout: "{{ traefik_restart_wait_timeout | default(30) }}"
changed_when: false

View File

@@ -0,0 +1,82 @@
---
- name: Check for existing Traefik containers
shell: docker ps -a --filter "name=traefik" --format "{{ '{{' }}.ID{{ '}}' }}"
register: existing_traefik_containers
changed_when: false
failed_when: false
- name: Stop and remove existing Traefik containers
shell: docker stop {{ item }} && docker rm {{ item }}
loop: "{{ existing_traefik_containers.stdout_lines }}"
when: existing_traefik_containers.stdout_lines | length > 0
ignore_errors: yes
- name: Check if ports 80 and 443 are in use
shell: |
if sudo ss -tlnp 'sport = :80' 2>/dev/null | grep -q LISTEN; then
echo "port_80_in_use"
fi
if sudo ss -tlnp 'sport = :443' 2>/dev/null | grep -q LISTEN; then
echo "port_443_in_use"
fi
register: port_check
changed_when: false
failed_when: false
- name: Display port status
debug:
msg:
- "Port 80 status: {{ 'IN USE' if 'port_80_in_use' in port_check.stdout else 'FREE' }}"
- "Port 443 status: {{ 'IN USE' if 'port_443_in_use' in port_check.stdout else 'FREE' }}"
- "Note: docker-proxy listening on ports is normal when Traefik container is running"
- name: Warn if ports are blocked by non-docker processes
debug:
msg: "WARNING: Ports 80/443 appear to be in use. This may prevent Traefik from starting. Check with: sudo ss -tlnp 'sport = :80'"
when: ('port_80_in_use' in port_check.stdout or 'port_443_in_use' in port_check.stdout) and existing_traefik_containers.stdout_lines | length == 0
- name: Check if acme.json exists
stat:
path: "{{ traefik_stack_path }}/acme.json"
register: acme_json_stat
- name: Remove acme.json if it's a directory (should be a file)
shell: |
if [ -d "{{ traefik_stack_path }}/acme.json" ]; then
rm -rf "{{ traefik_stack_path }}/acme.json"
fi
become: yes
when: acme_json_stat.stat.exists and acme_json_stat.stat.isdir
- name: Ensure Traefik acme.json exists and has correct permissions
file:
path: "{{ traefik_stack_path }}/acme.json"
state: touch
mode: '0600'
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
become: yes
when: not acme_json_stat.stat.exists or (acme_json_stat.stat.exists and acme_json_stat.stat.isdir)
- name: Deploy Traefik stack
community.docker.docker_compose_v2:
project_src: "{{ traefik_stack_path }}"
state: present
pull: always
register: traefik_compose_result
- name: Check Traefik container status
shell: |
docker compose -f {{ traefik_stack_path }}/docker-compose.yml ps traefik | grep -Eiq "Up|running"
register: traefik_state
changed_when: false
until: traefik_state.rc == 0
retries: "{{ ((traefik_wait_timeout | int) + (traefik_wait_interval | int) - 1) // (traefik_wait_interval | int) }}"
delay: "{{ traefik_wait_interval | int }}"
failed_when: traefik_state.rc != 0
when: not ansible_check_mode
- name: Record Traefik deployment facts
set_fact:
traefik_stack_changed: "{{ traefik_compose_result.changed | default(false) }}"
traefik_log_hint: ""

View File

@@ -0,0 +1,135 @@
---
# Check Traefik Logs and ACME Challenge Status
- name: Check if Traefik stack directory exists
ansible.builtin.stat:
path: "{{ traefik_stack_path }}"
register: traefik_stack_exists
- name: Fail if Traefik stack directory does not exist
ansible.builtin.fail:
msg: "Traefik stack directory not found at {{ traefik_stack_path }}"
when: not traefik_stack_exists.stat.exists
- name: Get recent Traefik logs
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose logs {{ traefik_container_name }} --tail={{ traefik_logs_tail | default(100) }} 2>&1
register: traefik_logs
changed_when: false
failed_when: false
- name: Check for ACME challenge errors in container logs
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose logs {{ traefik_container_name }} 2>&1 | grep -iE "acme.*challenge|Cannot retrieve.*ACME" | tail -{{ traefik_logs_error_tail | default(20) }} || echo "No ACME challenge errors found in recent logs"
register: acme_errors
changed_when: false
failed_when: false
- name: Check for ACME challenge errors in log file
ansible.builtin.shell: |
tail -n {{ traefik_logs_tail | default(100) }} {{ traefik_stack_path }}/logs/traefik.log 2>/dev/null | grep -iE "acme.*challenge|Cannot retrieve.*ACME" | tail -{{ traefik_logs_error_tail | default(20) }} || echo "No ACME challenge errors found in log file"
register: acme_errors_file
changed_when: false
failed_when: false
ignore_errors: yes
- name: Get logs from last N minutes
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose logs {{ traefik_container_name }} --since {{ traefik_logs_since_minutes | default(10) }}m 2>&1 | tail -{{ traefik_logs_recent_tail | default(50) }}
register: recent_logs
changed_when: false
failed_when: false
when: traefik_logs_since_minutes is defined
- name: Count ACME challenge errors in last hour
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose logs {{ traefik_container_name }} --since 1h 2>&1 | grep -c "Cannot retrieve.*ACME challenge" || echo "0"
register: acme_error_count
changed_when: false
failed_when: false
- name: Display ACME challenge error summary
ansible.builtin.debug:
msg: |
========================================
Traefik ACME Challenge Status
========================================
ACME Errors (last hour): {{ acme_error_count.stdout }}
========================================
{% if acme_error_count.stdout | int > 0 %}
⚠️ ACME challenge errors still occurring
{% else %}
✅ No ACME challenge errors in the last hour!
{% endif %}
when: traefik_show_status | default(true) | bool
- name: Display ACME challenge errors from container logs
ansible.builtin.debug:
msg: |
========================================
ACME Challenge Errors (Container Logs):
========================================
{{ acme_errors.stdout }}
========================================
when: traefik_show_status | default(true) | bool
- name: Display ACME challenge errors from log file
ansible.builtin.debug:
msg: |
========================================
ACME Challenge Errors (Log File):
========================================
{{ acme_errors_file.stdout }}
========================================
when: traefik_show_status | default(true) | bool
- name: Display recent Traefik logs
ansible.builtin.debug:
msg: |
========================================
Recent Traefik Logs (last {{ traefik_logs_since_minutes | default(10) }} minutes):
========================================
{{ recent_logs.stdout | default('No recent logs') }}
========================================
when:
- traefik_logs_since_minutes is defined
- traefik_show_status | default(true) | bool
- name: Display all Traefik logs
ansible.builtin.debug:
msg: |
========================================
Traefik Container Logs (last {{ traefik_logs_tail | default(100) }} lines):
========================================
{{ traefik_logs.stdout | default('No logs available') }}
========================================
when:
- traefik_show_all_logs | default(false) | bool
- traefik_show_status | default(true) | bool
- name: Display final summary
ansible.builtin.debug:
msg: |
========================================
Summary
========================================
{% if acme_error_count.stdout | int == 0 %}
✅ SUCCESS: No ACME challenge errors in the last hour!
The Traefik configuration fix appears to be working. Monitor the logs
for the next certificate renewal attempt (usually hourly) to confirm.
{% else %}
⚠️ WARNING: {{ acme_error_count.stdout }} ACME challenge errors found in the last hour.
The errors may be from before the configuration fix was applied.
Monitor the logs for the next certificate renewal attempt to see if
the errors have stopped.
{% endif %}
========================================
when: traefik_show_status | default(true) | bool

View File

@@ -1,23 +0,0 @@
---
- name: Deploy Traefik stack
community.docker.docker_compose_v2:
project_src: "{{ traefik_stack_path }}"
state: present
pull: always
register: traefik_compose_result
- name: Check Traefik container status
shell: |
docker compose -f {{ traefik_stack_path }}/docker-compose.yml ps traefik | grep -Eiq "Up|running"
register: traefik_state
changed_when: false
until: traefik_state.rc == 0
retries: "{{ ((traefik_wait_timeout | int) + (traefik_wait_interval | int) - 1) // (traefik_wait_interval | int) }}"
delay: "{{ traefik_wait_interval | int }}"
failed_when: traefik_state.rc != 0
when: not ansible_check_mode
- name: Record Traefik deployment facts
set_fact:
traefik_stack_changed: "{{ traefik_compose_result.changed | default(false) }}"
traefik_log_hint: ""

View File

@@ -0,0 +1,113 @@
---
# Restart and Recreate Traefik Container Tasks
# Supports both restart (force-recreate) and full recreate (down + up)
- name: Check if Traefik stack directory exists
ansible.builtin.stat:
path: "{{ traefik_stack_path }}"
register: traefik_stack_exists
- name: Fail if Traefik stack directory does not exist
ansible.builtin.fail:
msg: "Traefik stack directory not found at {{ traefik_stack_path }}"
when: not traefik_stack_exists.stat.exists
- name: Check Traefik container status before restart
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose ps {{ traefik_container_name }} --format json
register: traefik_status_before
changed_when: false
failed_when: false
- name: Display Traefik status before restart
ansible.builtin.debug:
msg: |
================================================================================
Traefik Container Status (Before Restart):
{{ traefik_status_before.stdout | default('Container not found or Docker not running') }}
================================================================================
when: traefik_show_status | default(true) | bool
- name: Recreate Traefik container to apply new configuration
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose up -d --force-recreate {{ traefik_container_name }}
register: traefik_restart
changed_when: traefik_restart.rc == 0
when: traefik_restart_action | default('restart') == 'restart'
notify: wait for traefik
- name: Stop and remove Traefik container (full recreate)
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose down {{ traefik_container_name }}
register: traefik_down
changed_when: traefik_down.rc == 0
when: traefik_restart_action | default('restart') == 'recreate'
- name: Create new Traefik container with updated configuration (full recreate)
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose up -d {{ traefik_container_name }}
register: traefik_up
changed_when: traefik_up.rc == 0
when: traefik_restart_action | default('restart') == 'recreate'
notify: wait for traefik
- name: Wait for Traefik to be ready
ansible.builtin.wait_for:
timeout: "{{ traefik_restart_wait_timeout | default(30) }}"
changed_when: false
- name: Check Traefik container status after restart
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose ps {{ traefik_container_name }} --format json
register: traefik_status_after
changed_when: false
failed_when: false
- name: Check Traefik health endpoint
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose exec -T {{ traefik_container_name }} traefik healthcheck --ping 2>&1 || echo "HEALTH_CHECK_FAILED"
register: traefik_health
ignore_errors: yes
changed_when: false
when: traefik_check_health | default(true) | bool
- name: Check if ACME challenge router is in labels (for recreate action)
ansible.builtin.shell: |
cd {{ traefik_stack_path }}
docker compose ps {{ traefik_container_name }} --format json | jq -r '.[0].Labels' | grep -i 'acme-challenge' || echo "NO_ACME_ROUTER"
register: acme_router_check
changed_when: false
failed_when: false
when: traefik_restart_action | default('restart') == 'recreate'
- name: Display final status
ansible.builtin.debug:
msg: |
========================================
Traefik Restart Summary
========================================
Action: {{ traefik_restart_action | default('restart') | upper }}
Container Status: {% if 'State":"running' in (traefik_status_after.stdout | default('')) %}✅ RUNNING{% else %}❌ NOT RUNNING{% endif %}
{% if traefik_check_health | default(true) | bool %}
Health Check: {% if 'HEALTH_CHECK_FAILED' not in (traefik_health.stdout | default('')) %}✅ HEALTHY{% else %}❌ UNHEALTHY or TIMEOUT{% endif %}
{% endif %}
{% if traefik_restart_action | default('restart') == 'recreate' %}
ACME Challenge Router: {% if 'NO_ACME_ROUTER' in acme_router_check.stdout %}✅ REMOVED (correct!){% else %}⚠️ Still present in labels{% endif %}
{% endif %}
Restart Action: {% if (traefik_restart.changed | default(false)) or (traefik_up.changed | default(false)) %}🔄 Container restarted{% else %} No restart needed{% endif %}
========================================
{% if 'State":"running' in (traefik_status_after.stdout | default('')) %}
✅ Traefik is running!
{% else %}
❌ Traefik is not running. Check logs for details:
docker logs {{ traefik_container_name }}
{% endif %}
========================================
when: traefik_show_status | default(true) | bool